def test_gcp_to_dict(): r = Resource.factory( 'gcp', client_kwargs=client_kwargs, name=test_resource_name, project_id=test_project, resource_type='storage.buckets.iam', ) data = r.to_dict() # with no creds, we should still get this key but it should be none assert data['project_id'] == test_project
def callback(pubsub_message): try: log_message = json.loads(pubsub_message.data) log_id = log_message.get('insertId', 'unknown-id') # Get the timestamp from the log message log_time_str = log_message.get('timestamp') if log_time_str: log_timestamp = int( dateutil.parser.parse(log_time_str).timestamp()) else: log_timestamp = int(time.time()) except (json.JSONDecodeError, AttributeError): # We can't parse the log message, nothing to do here logger.debug('Failure loading json, discarding message') pubsub_message.ack() return logger.debug({ 'log_id': log_id, 'message': 'Received & decoded json message' }) # normal activity logs have logName in this form: # projects/<p>/logs/cloudaudit.googleapis.com%2Factivity # data access logs have a logName field that looks like: # projects/<p>/logs/cloudaudit.googleapis.com%2Fdata_access # # try to only handle the normal activity logs log_name_end = log_message.get('logName', '').split('/')[-1] if log_name_end != 'cloudaudit.googleapis.com%2Factivity': logger.debug({ 'log_id': log_id, 'message': 'Not an activity log, discarding' }) pubsub_message.ack() return # Attempt to get a list of asset(s) affected by this event try: assets = StackdriverParser.get_assets(log_message) if len(assets) == 0: # We did not recognize any assets in this message logger.debug({ 'log_id': log_id, 'message': 'No recognized assets in log' }) pubsub_message.ack() return except Exception as e: # If we fail to get asset info from the message, the message must be # bad logger.debug({ 'log_id': log_id, 'message': 'Exception while parsing message for asset details', 'details': str(e) }) pubsub_message.ack() return for asset_info in assets: # Start building our log message log = {} log['log_id'] = log_id log['asset_info'] = asset_info if asset_info.get('operation_type') != 'write': # No changes, no need to check anything logger.debug({ 'log_id': log_id, 'message': 'Message is not a create/update, nothing to do' }) pubsub_message.ack() continue try: resource = Resource.factory('gcp', asset_info, credentials=app_creds) except Exception as e: logger.debug({ 'log_id': log_id, 'message': 'Internal failure in rpe-lib', 'details': str(e) }) pubsub_message.ack() continue logger.debug({'log_id': log_id, 'message': 'Analyzing for violations'}) try: v = rpe.violations(resource) log['violation_count'] = len(v) log['remediation_count'] = 0 except Exception as e: logger.debug({ 'log_id': log_id, 'message': 'Execption while checking for violations', 'details': str(e) }) continue if not enforce_policy: logger.debug({ 'log_id': log_id, 'message': 'Enforcement is disabled, processing complete' }) pubsub_message.ack() continue if enforcement_delay: # If the log is old, subtract that from the enforcement delay message_age = int(time.time()) - log_timestamp log['message_age'] = message_age delay = max(0, enforcement_delay - message_age) logger.debug({ 'log_id': log_id, 'message': 'Delaying enforcement by %d seconds, message is already %d seconds old and our configured delay is %d seconds' % (delay, message_age, enforcement_delay) }) time.sleep(delay) for (engine, violation) in v: logger.debug({ 'log_id': log_id, 'message': 'Executing remediation' }) try: engine.remediate(resource, violation) log['remediation_count'] += 1 except Exception as e: # Catch any other exceptions so we can acknowledge the message. # Otherwise they start to fill up the buffer of unacknowledged messages logger({ 'log_id': log_id, 'message': 'Exception while attempting remediation of {}'.format( violation), 'details': str(e) }) if 'exceptions' not in log: log['exceptions'] = [] log['exceptions'].append(str(e)) logger(log) # Finally ack the message after we're done with all of the assets pubsub_message.ack()
def test_gcp_resource_factory_invalid(): with pytest.raises(AssertionError): Resource.factory('gcp', {})
def test_gcp_full_resource_name(case): r = Resource.factory("gcp", client_kwargs=client_kwargs, **case.input) assert r.full_resource_name() == case.name
def test_gcp_resource_factory_bad_type(): with pytest.raises(ResourceException) as excinfo: Resource.factory('gcp', resource_type='fake.type') assert 'Unknown resource type' in str(excinfo.value)
def test_gcp_resource_factory_no_type(): with pytest.raises(ResourceException) as excinfo: Resource.factory('gcp') assert 'Resource type not specified' in str(excinfo.value)
def test_gcp_resource_factory(case): r = Resource.factory("gcp", client_kwargs=client_kwargs, **case.input) assert r.__class__ == case.cls assert r.type() == case.type assert isinstance(r._get_request_args(), dict)
def callback(pubsub_message): try: log_message = json.loads(pubsub_message.data) log_id = log_message.get('insertId', 'unknown-id') # Get the timestamp from the log message log_time_str = log_message.get('timestamp') if log_time_str: log_timestamp = int( dateutil.parser.parse(log_time_str).timestamp()) else: log_timestamp = int(time.time()) except (json.JSONDecodeError, AttributeError): # We can't parse the log message, nothing to do here logger.debug('Failure loading json, discarding message') pubsub_message.ack() return logger.debug({ 'log_id': log_id, 'message': 'Received & decoded json message' }) # normal activity logs have logName in this form: # projects/<p>/logs/cloudaudit.googleapis.com%2Factivity # data access logs have a logName field that looks like: # projects/<p>/logs/cloudaudit.googleapis.com%2Fdata_access # # try to only handle the normal activity logs log_name_end = log_message.get('logName', '').split('/')[-1] if log_name_end != 'cloudaudit.googleapis.com%2Factivity': logger.debug({ 'log_id': log_id, 'message': 'Not an activity log, discarding' }) pubsub_message.ack() return # Attempt to get a list of asset(s) affected by this event try: assets = StackdriverParser.get_assets(log_message) if len(assets) == 0: # We did not recognize any assets in this message logger.debug({ 'log_id': log_id, 'message': 'No recognized assets in log' }) pubsub_message.ack() return except Exception as e: # If we fail to get asset info from the message, the message must be # bad logger.debug({ 'log_id': log_id, 'message': 'Exception while parsing message for asset details', 'details': str(e) }) pubsub_message.ack() return for asset_info in assets: if asset_info.get('operation_type') != 'write': # No changes, no need to check anything logger.debug({ 'log_id': log_id, 'message': 'Message is not a create/update, nothing to do' }) pubsub_message.ack() continue try: project_creds = cb.get_credentials( project_id=asset_info['project_id']) if per_project_logging: project_logger = Logger( app_name, True, # per-project logging is always stackdriver asset_info['project_id'], project_creds) resource = Resource.factory('gcp', asset_info, credentials=project_creds) except Exception as e: log_failure('Internal failure in rpe-lib', asset_info, log_id, e) pubsub_message.ack() continue try: fill_asset_info(asset_info, resource) except Exception as e: # We can still proceed without the additional data, but we should log that it happened log_failure('Failed to load additional asset metadata', asset_info, log_id, e) logger.debug({'log_id': log_id, 'message': 'Analyzing for violations'}) # Fetch a list of policies and then violations. The policy # list is needed to log data about evaluated policies that are # not violated by the current asset. try: policies = rpe.policies(resource) except Exception as e: log_failure('Exception while retrieving policies', asset_info, log_id, e) pubsub_message.ack() continue try: violations = rpe.violations(resource) except Exception as e: log_failure('Exception while checking for violations', asset_info, log_id, e) continue # Prepare log messages message_age = int(time.time()) - log_timestamp logs = mklogs(log_id, asset_info, policies, violations, message_age) if not enforce_policy: logger.debug({ 'log_id': log_id, 'message': 'Enforcement is disabled, processing complete' }) pubsub_message.ack() continue if enforcement_delay: # If the log is old, subtract that from the enforcement delay delay = max(0, enforcement_delay - message_age) logger.debug({ 'log_id': log_id, 'message': 'Delaying enforcement by %d seconds, message is already %d seconds old and our configured delay is %d seconds' % (delay, message_age, enforcement_delay) }) time.sleep(delay) for (engine, violated_policy) in violations: logger.debug({ 'log_id': log_id, 'message': 'Executing remediation' }) try: engine.remediate(resource, violated_policy) logs[violated_policy]['remediated'] = True logs[violated_policy]['remediated_at'] = int(time.time()) if per_project_logging: project_log = { 'event': 'remediation', 'trigger_event': asset_info, 'policy': violated_policy, } project_logger(project_log) except Exception as e: # Catch any other exceptions so we can acknowledge the message. # Otherwise they start to fill up the buffer of unacknowledged messages log_failure('Execption while attempting to remediate', asset_info, log_id, e) # submit all of the accumulated logs for policy in logs: logger(logs[policy]) # Finally ack the message after we're done with all of the assets pubsub_message.ack()