def get_storage_keys(storage_account_id): s = local_session(Session) client = s.client('azure.mgmt.storage.StorageManagementClient') resource_group = ResourceIdParser.get_resource_group(storage_account_id) resource_name = ResourceIdParser.get_resource_name(storage_account_id) keys = client.storage_accounts.list_keys(resource_group, resource_name) return keys.keys
def tearDown(self, *args, **kwargs): super(DeleteRecordSetTest, self).tearDown(*args, **kwargs) rs = self.deleted_recordset rs_id = rs['id'] rs_parent_id = rs[ChildTypeInfo.parent_key] zone_name = ResourceIdParser.get_resource_name(rs_parent_id) rs_name = ResourceIdParser.get_resource_name(rs_id) rs_type = rs['type'].split('/')[-1] rs_ttl = rs['properties']['TTL'] rs_arecord_ipaddr = rs['properties']['ARecords'][0]['ipv4Address'] DeleteRecordSetTest.client.create_or_update( resource_group_name=rs['resourceGroup'], zone_name=zone_name, relative_record_set_name=rs_name, record_type=rs_type, parameters={ 'ttl': rs_ttl, 'arecords': [ { 'ipv4_address': rs_arecord_ipaddr } ] }, )
def _process_resource_set(self, resources, event=None): client = self.manager.get_client( 'azure.mgmt.resource.locks.ManagementLockClient') result = [] for resource in resources: if resource.get('resourceGroup') is None: locks = [ r.serialize(True) for r in client.management_locks. list_at_resource_group_level(resource['name']) ] else: locks = [ r.serialize(True) for r in client.management_locks.list_at_resource_level( resource['resourceGroup'], ResourceIdParser.get_namespace(resource['id']), ResourceIdParser.get_resource_name( resource.get('c7n:parent-id')) or '', ResourceIdParser.get_resource_type(resource['id']), resource['name']) ] if StringUtils.equal('Absent', self.lock_type) and not locks: result.append(resource) else: for lock in locks: if StringUtils.equal('Any', self.lock_type) or \ StringUtils.equal(lock['properties']['level'], self.lock_type): result.append(resource) break return result
def resource_api_version(self, resource_id): """ latest non-preview api version for resource """ namespace = ResourceIdParser.get_namespace(resource_id) resource_type = ResourceIdParser.get_resource_type(resource_id) cache_id = namespace + resource_type if cache_id in self._provider_cache: return self._provider_cache[cache_id] resource_client = self.client( 'azure.mgmt.resource.ResourceManagementClient') provider = resource_client.providers.get(namespace) # The api version may be directly provided if not provider.resource_types and resource_client.providers.api_version: return resource_client.providers.api_version rt = next((t for t in provider.resource_types if StringUtils.equal(t.resource_type, resource_type)), None) if rt and rt.api_versions: versions = [ v for v in rt.api_versions if 'preview' not in v.lower() ] api_version = versions[0] if versions else rt.api_versions[0] self._provider_cache[cache_id] = api_version return api_version
def _process_resource(self, resource): resource_group = ResourceIdParser.get_resource_group(resource['id']) account_name = ResourceIdParser.get_resource_name( resource['c7n:parent-id']) self.client.blob_containers.update(resource_group, account_name, resource['name'], public_access=self.data['value'])
def get_cosmos_data_client_for_account(account_id, account_endpoint, manager, readonly=True): key = CosmosDBChildResource.get_cosmos_key( ResourceIdParser.get_resource_group(account_id), ResourceIdParser.get_resource_name(account_id), manager.get_client(), readonly ) data_client = CosmosClient(url_connection=account_endpoint, auth={'masterKey': key}) return data_client
def get_storage_account_connection_string(id): rg_name = ResourceIdParser.get_resource_group(id) name = ResourceIdParser.get_resource_name(id) client = local_session(Session).client( 'azure.mgmt.storage.StorageManagementClient') obj = client.storage_accounts.list_keys(rg_name, name) connection_string = 'DefaultEndpointsProtocol={};AccountName={};AccountKey={}'.format( 'https', name, obj.keys[0].value) return connection_string
def deploy_function_app(parameters): function_app_unit = FunctionAppDeploymentUnit() function_app_params = \ {'name': parameters.function_app_name, 'resource_group_name': parameters.function_app_resource_group_name} function_app = function_app_unit.get(function_app_params) if function_app: # retrieve the type of app service plan hosting the existing function app session = local_session(Session) web_client = session.client( 'azure.mgmt.web.WebSiteManagementClient') app_id = function_app.server_farm_id app_name = ResourceIdParser.get_resource_name(app_id) app_resource_group_name = ResourceIdParser.get_resource_group( app_id) app_service_plan = web_client.app_service_plans.get( app_resource_group_name, app_name) # update the sku tier to properly reflect what is provisioned in Azure parameters.service_plan['sku_tier'] = app_service_plan.sku.tier return function_app sp_unit = AppServicePlanUnit() app_service_plan = sp_unit.provision_if_not_exists( parameters.service_plan) # if only resource_id is provided, retrieve existing app plan sku tier parameters.service_plan['sku_tier'] = app_service_plan.sku.tier ai_unit = AppInsightsUnit() app_insights = ai_unit.provision_if_not_exists(parameters.app_insights) sa_unit = StorageAccountUnit() storage_account_id = sa_unit.provision_if_not_exists( parameters.storage_account).id con_string = FunctionAppUtilities.get_storage_account_connection_string( storage_account_id) function_app_params.update({ 'location': app_service_plan.location, 'app_service_plan_id': app_service_plan.id, 'app_insights_key': app_insights.instrumentation_key, 'is_consumption_plan': FunctionAppUtilities.is_consumption_plan(parameters), 'storage_account_connection_string': con_string }) return function_app_unit.provision(function_app_params)
def get_storage_account_connection_string(id): rg_name = ResourceIdParser.get_resource_group(id) name = ResourceIdParser.get_resource_name(id) client = local_session(Session).client('azure.mgmt.storage.StorageManagementClient') obj = client.storage_accounts.list_keys(rg_name, name) connection_string = 'DefaultEndpointsProtocol={};AccountName={};AccountKey={}'.format( 'https', name, obj.keys[0].value) return connection_string
def extract_properties(options, name, properties): settings = options.get(name, {}) result = {} # str type implies settings is a resource id if isinstance(settings, six.string_types): result['id'] = settings result['name'] = ResourceIdParser.get_resource_name(settings) result['resource_group_name'] = ResourceIdParser.get_resource_group(settings) else: for key in properties.keys(): result[key] = settings.get(StringUtils.snake_to_camel(key), properties[key]) return result
def get_cosmos_data_client(resources, manager, readonly=True): cosmos_db_key = resources[0]['c7n:parent-id'] url_connection = resources[0]['c7n:document-endpoint'] # Get the data client keys key = CosmosDBChildResource.get_cosmos_key( ResourceIdParser.get_resource_group(cosmos_db_key), ResourceIdParser.get_resource_name(cosmos_db_key), manager.get_client(), readonly) # Build a data client data_client = CosmosClient(url_connection=url_connection, auth={'masterKey': key}) return data_client
def prepare_queue_storage(self, queue_resource_id, queue_name): """ Create a storage client using unusual ID/group reference as this is what we require for event subscriptions """ storage_client = self.session.client( 'azure.mgmt.storage.StorageManagementClient') account = storage_client.storage_accounts.get_properties( ResourceIdParser.get_resource_group(queue_resource_id), ResourceIdParser.get_resource_name(queue_resource_id)) Storage.create_queue_from_storage_account(account, queue_name, self.session) return account
def tearDown(self): if self.resources: self.assertEqual(len(self.resources), 1) resource = self.resources[0] if resource.get('resourceGroup') is None: self.client.management_locks.delete_at_resource_group_level( resource['name'], resource['lock']) else: self.client.management_locks.delete_at_resource_level( resource['resourceGroup'], ResourceIdParser.get_namespace(resource['id']), ResourceIdParser.get_resource_name( resource.get('c7n:parent-id')) or '', ResourceIdParser.get_resource_type(resource['id']), resource['name'], resource['lock'])
def _process_resource_set(self, resources, event=None): client = self.manager.get_client() result = [] for resource in resources: if 'transparentDataEncryption' not in resource['properties']: server_id = resource[ChildTypeInfo.parent_key] server_name = ResourceIdParser.get_resource_name(server_id) tde = client.transparent_data_encryptions.get( resource['resourceGroup'], server_name, resource['name'], "current") resource['properties']['transparentDataEncryption'] = \ tde.serialize(True).get('properties', {}) required_status = 'Enabled' if self.enabled else 'Disabled' if StringUtils.equal( resource['properties']['transparentDataEncryption'].get('status'), required_status): result.append(resource) return result
def __init__(self, storage_id, queue_name, policy_uri, log_group=None, metrics=None, output_dir=None): logging.basicConfig(level=logging.INFO, format='%(message)s') log.info("Running Azure Cloud Custodian Self-Host") resources.load_available() self.session = local_session(Session) self.storage_session = self.session storage_subscription_id = ResourceIdParser.get_subscription_id(storage_id) if storage_subscription_id != self.session.subscription_id: self.storage_session = Session(subscription_id=storage_subscription_id) # Load configuration self.options = Host.build_options(output_dir, log_group, metrics) self.policy_storage_uri = policy_uri self.event_queue_id = storage_id self.event_queue_name = queue_name # Default event queue name is the subscription ID if not self.event_queue_name: self.event_queue_name = self.session.subscription_id # Prepare storage bits self.policy_blob_client = None self.blob_cache = {} self.queue_storage_account = self.prepare_queue_storage( self.event_queue_id, self.event_queue_name) self.queue_service = None # Register event subscription self.update_event_subscription() # Policy cache and dictionary self.policy_cache = tempfile.mkdtemp() self.policies = {} # Configure scheduler self.scheduler = BlockingScheduler(Host.get_scheduler_config()) logging.getLogger('apscheduler.executors.default').setLevel(logging.ERROR) logging.getLogger('apscheduler').setLevel(logging.ERROR) # Schedule recurring policy updates self.scheduler.add_job(self.update_policies, 'interval', seconds=policy_update_seconds, id="update_policies", next_run_time=datetime.now(), executor='threadpool') # Schedule recurring queue polling self.scheduler.add_job(self.poll_queue, 'interval', seconds=queue_poll_seconds, id="poll_queue", executor='threadpool') self.scheduler.start()
def augment(self, resources): for resource in resources: if 'id' in resource: resource[ 'resourceGroup'] = ResourceIdParser.get_resource_group( resource['id']) return resources
def augment(self, resources): # TODO: temporary put here. Applicable only to ARM resources. # Need to move to ARMResourceManager base class for resource in resources: if 'id' in resource: resource['resourceGroup'] = ResourceIdParser.get_resource_group(resource['id']) return resources
def test_lock_action_child_resource(self): p = self.load_policy({ 'name': 'lock-sqldatabase', 'resource': 'azure.sqldatabase', 'filters': [{ 'type': 'value', 'key': 'name', 'value': 'cclockeddb' }], 'actions': [{ 'type': 'lock', 'lock-type': 'ReadOnly', 'lock-name': 'dblock', 'lock-notes': 'testNotes' }], }) self.resources = p.run() self.assertEqual(len(self.resources), 1) self.assertEqual(self.resources[0]['name'], 'cclockeddb') locks = [ r.serialize(True) for r in self.client.management_locks.list_at_resource_level( 'test_locked', 'Microsoft.Sql/servers', ResourceIdParser.get_resource_name( self.resources[0]['c7n:parent-id']), 'databases', 'cclockeddb') if r.name == 'dblock' ] self.assertEqual(len(locks), 1) self.assertEqual(locks[0]['properties']['level'], 'ReadOnly') self.assertEqual(locks[0]['properties']['notes'], 'testNotes') self.resources[0]['lock'] = locks[0]['name']
def get_backup_retention_policy_context(database): server_id = database[ChildTypeInfo.parent_key] resource_group_name = database['resourceGroup'] database_name = database['name'] server_name = ResourceIdParser.get_resource_name(server_id) return resource_group_name, server_name, database_name
def get_resources(self, resource_ids): resource_client = self.get_client('azure.mgmt.resource.ResourceManagementClient') data = [ resource_client.resource_groups.get(ResourceIdParser.get_resource_group(rid)) for rid in resource_ids ] return [r.serialize(True) for r in data]
def _process_resource_set(self, resources, event=None): client = self.manager.get_client() result = [] for resource in resources: database_name = resource['name'] if StringUtils.equal(database_name, "master"): continue if 'c7n:data-masking-policy' not in resource: server_id = resource[ChildTypeInfo.parent_key] server_name = ResourceIdParser.get_resource_name(server_id) dmr = client.data_masking_policies.get( resource['resourceGroup'], server_name, database_name) if dmr: resource['c7n:data-masking-policy'] = dmr.serialize( True).get('properties', {}) else: resource['c7n:data-masking-policy'] = {} required_status = 'Enabled' if self.enabled else 'Disabled' if StringUtils.equal( resource['c7n:data-masking-policy'].get( 'dataMaskingState'), required_status): result.append(resource) return result
def test_get_blob_client_from_storage_account_without_sas(self): account = self.setup_account() resource_group = ResourceIdParser.get_resource_group(account.id) blob_client = StorageUtilities.get_blob_client_from_storage_account( resource_group, account.name, self.session) self.assertIsNotNone(blob_client)
def test_delete_a_record_set(self): record_set_name = 'deleteme' p = self.load_policy({ 'name': 'test-delete-a-record-set', 'resource': 'azure.recordset', 'filters': [ { 'type': 'value', 'key': 'name', 'op': 'eq', 'value': record_set_name } ], 'actions': [ { 'type': 'delete' } ] }) resources = p.run() self.assertEqual(len(resources), 1) self.assertEqual(resources[0]['name'], record_set_name) rs = resources[0] self.deleted_recordset = rs rg = rs['resourceGroup'] zone = ResourceIdParser.get_resource_name(rs[ChildTypeInfo.parent_key]) self._assert_record_set_not_present(record_set_name, rg, zone)
def _process_resource(self, resource): if resource.get('resourceGroup') is None: self.client.management_locks.create_or_update_at_resource_group_level( resource['name'], 'lock_' + resource['name'] + '_' + self.lock_type, ManagementLockObject(level=self.lock_type)) else: self.client.management_locks.create_or_update_at_resource_level( resource['resourceGroup'], ResourceIdParser.get_namespace(resource['id']), ResourceIdParser.get_resource_name( resource.get('c7n:parent-id')) or '', ResourceIdParser.get_resource_type(resource['id']), resource['name'], 'custodian_lock_' + resource['name'] + '_' + self.lock_type, ManagementLockObject(level=self.lock_type))
def test_resize_action(self, update_mock): p = self.load_policy({ 'name': 'resize-sqldatabase', 'resource': 'azure.sqldatabase', 'filters': [ { 'type': 'value', 'key': 'name', 'value': 'cctestdb' } ], 'actions': [ { 'type': 'resize', 'tier': 'Standard', 'capacity': 100, 'max_size_bytes': 21474836480 } ], }) self.resources = p.run() self.assertEqual(len(self.resources), 1) self.assertEqual(self.resources[0]['name'], 'cctestdb') parent_id = ResourceIdParser.get_resource_name(self.resources[0]['c7n:parent-id']) expected_db_update = DatabaseUpdate(sku=Sku(capacity=100, tier='Standard', name='Standard'), max_size_bytes=21474836480) update_mock.assert_called_once() name, args, kwargs = update_mock.mock_calls[0] self.assertEqual('test_sqlserver', args[0]) self.assertEqual(parent_id, args[1]) self.assertEqual('cctestdb', args[2]) self.assertEqual(expected_db_update, args[3])
def _process_resource(self, resource): lock_name = self._get_lock_name(resource) lock_notes = self._get_lock_notes(resource) if is_resource_group(resource): self.client.management_locks.create_or_update_at_resource_group_level( resource['name'], lock_name, ManagementLockObject(level=self.lock_type, notes=lock_notes)) else: self.client.management_locks.create_or_update_at_resource_level( resource['resourceGroup'], ResourceIdParser.get_namespace(resource['id']), ResourceIdParser.get_resource_name( resource.get('c7n:parent-id')) or '', ResourceIdParser.get_resource_type(resource['id']), resource['name'], lock_name, ManagementLockObject(level=self.lock_type, notes=lock_notes))
def process(self, resources, event=None): result = [] for r in resources: if 'id' in r: t = ResourceIdParser.get_full_type(r['id']) if t.lower() in self.allowed_types: result.append(r) return result
def _process_resource(self, database): sku = Sku(capacity=self.capacity, tier=self.tier, name=self.tier) max_size_bytes = self.max_size_bytes if not 0 else database[ 'properties']['maxSizeBytes'] self.client.databases.update( database['resourceGroup'], ResourceIdParser.get_resource_name(database['c7n:parent-id']), database['name'], DatabaseUpdate(sku=sku, max_size_bytes=max_size_bytes))
def resource_api_version(self, resource_id): """ latest non-preview api version for resource """ namespace = ResourceIdParser.get_namespace(resource_id) resource_type = ResourceIdParser.get_resource_type(resource_id) if resource_type in self._provider_cache: return self._provider_cache[resource_type] resource_client = self.client('azure.mgmt.resource.ResourceManagementClient') provider = resource_client.providers.get(namespace) rt = next((t for t in provider.resource_types if t.resource_type == str(resource_type).split('/')[-1]), None) if rt and rt.api_versions: versions = [v for v in rt.api_versions if 'preview' not in v.lower()] api_version = versions[0] if versions else rt.api_versions[0] self._provider_cache[resource_type] = api_version return api_version
def extract_properties(options, name, properties): settings = options.get(name, {}) result = {} # str type implies settings is a resource id if isinstance(settings, six.string_types): result['id'] = settings result['name'] = ResourceIdParser.get_resource_name(settings) result['resource_group_name'] = ResourceIdParser.get_resource_group(settings) else: # get nested keys for key in properties.keys(): value = settings.get(StringUtils.snake_to_camel(key), properties[key]) if isinstance(value, dict): result[key] = \ AzureFunctionMode.extract_properties({'v': value}, 'v', properties[key]) else: result[key] = value return result
def test_resize_action(self): p = self.load_policy({ 'name': 'resize-sqldatabase', 'resource': 'azure.sqldatabase', 'filters': [{ 'type': 'value', 'key': 'name', 'value': 'cctestdb' }], 'actions': [{ 'type': 'resize', 'tier': 'Standard', 'capacity': 100, 'max_size_bytes': 21474836480 }], }) self.resources = p.run() self.assertEqual(len(self.resources), 1) self.assertEqual(self.resources[0]['name'], 'cctestdb') updated_database = self.client.databases.get( 'test_sqlserver', ResourceIdParser.get_resource_name( self.resources[0]['c7n:parent-id']), 'cctestdb') self.assertEqual(updated_database.sku.capacity, 100) self.assertEqual(updated_database.sku.tier, 'Standard') # The value for max_size_bytes returned by api is stale, # so we can't make an assertion until it's fixed # self.assertEqual(database.max_size_bytes, 21474836480) # Revert action self.client.databases.update( 'test_sqlserver', ResourceIdParser.get_resource_name( self.resources[0]['c7n:parent-id']), 'cctestdb', DatabaseUpdate(sku=Sku(capacity=125, tier='Premium', name='Premium'), max_size_bytes=2147483648))
def process_resource_set(self, resources): matched = [] try: # Skip if offer key is present anywhere because we already # queried and joined offers in a previous filter instance if not resources[0].get('c7n:offer'): # Get the data client keys parent_key = resources[0]['c7n:parent-id'] key = CosmosDBChildResource.get_cosmos_key( ResourceIdParser.get_resource_group(parent_key), ResourceIdParser.get_resource_name(parent_key), self.manager.get_parent_manager().get_client()) # Build a data client data_client = CosmosClient( url_connection=resources[0]['c7n:document-endpoint'], auth={'masterKey': key}) # Get the offers offers = list(data_client.ReadOffers()) # Match up offers to collections for resource in resources: offer = [ o for o in offers if o['resource'] == resource['_self'] ] resource['c7n:offer'] = offer # Pass each resource through the base filter for resource in resources: filtered_resource = super(CosmosDBOfferFilter, self).process(resource['c7n:offer'], event=None) if filtered_resource: matched.append(resource) except Exception as error: log.warning(error) return matched
def deploy_function_app(parameters): function_app_unit = FunctionAppDeploymentUnit() function_app_params = \ {'name': parameters.function_app_name, 'resource_group_name': parameters.function_app_resource_group_name} function_app = function_app_unit.get(function_app_params) if function_app: # retrieve the type of app service plan hosting the existing function app session = local_session(Session) web_client = session.client('azure.mgmt.web.WebSiteManagementClient') app_id = function_app.server_farm_id app_name = ResourceIdParser.get_resource_name(app_id) app_resource_group_name = ResourceIdParser.get_resource_group(app_id) app_service_plan = web_client.app_service_plans.get(app_resource_group_name, app_name) # update the sku tier to properly reflect what is provisioned in Azure parameters.service_plan['sku_tier'] = app_service_plan.sku.tier return function_app sp_unit = AppServicePlanUnit() app_service_plan = sp_unit.provision_if_not_exists(parameters.service_plan) # if only resource_id is provided, retrieve existing app plan sku tier parameters.service_plan['sku_tier'] = app_service_plan.sku.tier ai_unit = AppInsightsUnit() app_insights = ai_unit.provision_if_not_exists(parameters.app_insights) sa_unit = StorageAccountUnit() storage_account_id = sa_unit.provision_if_not_exists(parameters.storage_account).id con_string = FunctionAppUtilities.get_storage_account_connection_string(storage_account_id) function_app_params.update( {'location': app_service_plan.location, 'app_service_plan_id': app_service_plan.id, 'app_insights_key': app_insights.instrumentation_key, 'is_consumption_plan': FunctionAppUtilities.is_consumption_plan(parameters), 'storage_account_connection_string': con_string}) return function_app_unit.provision(function_app_params)
def test_get_blob_client_from_storage_account_without_sas_fails_sas_generation(self): with self.assertRaises(ValueError): account = self.setup_account() resource_group = ResourceIdParser.get_resource_group(account.id) blob_client = StorageUtilities.get_blob_client_from_storage_account( resource_group, account.name, self.session) # create container for package blob_client.create_container('test') blob_client.create_blob_from_text('test', 'test.txt', 'My test contents.') blob_client.generate_blob_shared_access_signature('test', 'test.txt')
def resource_api_version(self, resource_id): """ latest non-preview api version for resource """ namespace = ResourceIdParser.get_namespace(resource_id) resource_type = ResourceIdParser.get_resource_type(resource_id) cache_id = namespace + resource_type if cache_id in self._provider_cache: return self._provider_cache[cache_id] resource_client = self.client('azure.mgmt.resource.ResourceManagementClient') provider = resource_client.providers.get(namespace) rt = next((t for t in provider.resource_types if StringUtils.equal(t.resource_type, resource_type)), None) if rt and rt.api_versions: versions = [v for v in rt.api_versions if 'preview' not in v.lower()] api_version = versions[0] if versions else rt.api_versions[0] self._provider_cache[cache_id] = api_version return api_version
def test_get_blob_client_from_storage_account_with_sas(self): account = self.setup_account() resource_group = ResourceIdParser.get_resource_group(account.id) blob_client = StorageUtilities.get_blob_client_from_storage_account( resource_group, account.name, self.session, True) # create sas token for blob blob_client.create_container('test') blob_client.create_blob_from_text('test', 'test.txt', 'My test contents.') sas = blob_client.generate_blob_shared_access_signature('test', 'test.txt') self.assertIsNotNone(sas)
def main(input): logging.info("Running Azure Cloud Custodian Policy") context = { 'config_file': join(dirname(__file__), 'config.json'), 'auth_file': join(dirname(__file__), 'auth.json') } event = None subscription_id = None if type(input) is QueueMessage: if input.dequeue_count > max_dequeue_count: return event = input.get_json() subscription_id = ResourceIdParser.get_subscription_id(event['subject']) handler.run(event, context, subscription_id)
def test_get_namespace(self): self.assertEqual(ResourceIdParser.get_namespace(RESOURCE_ID), "Microsoft.Compute")
def test_get_resource_group(self): self.assertEqual(ResourceIdParser.get_resource_group(RESOURCE_ID), "rgtest")
def test_resource_name(self): self.assertEqual(ResourceIdParser.get_resource_name(RESOURCE_ID), "nametest")
def test_get_subscription_id(self): self.assertEqual(ResourceIdParser.get_subscription_id(RESOURCE_ID), DEFAULT_SUBSCRIPTION_ID)
def test_get_resource_type(self): self.assertEqual(ResourceIdParser.get_resource_type(RESOURCE_ID), "virtualMachines")
def augment(self, resources): for resource in resources: if 'id' in resource: resource['resourceGroup'] = ResourceIdParser.get_resource_group(resource['id']) return resources