def find_inactive_findings(new_findings, findings_in_cscc): """Finds the findings that does not correspond to the latest scanner run and updates it's state to inactive. Args: new_findings (list): Latest violations that are transformed to findings. findings_in_cscc (list): Findings pulled from CSCC that corresponds to the previous scanner run. Returns: list: Findings whose state has been marked as 'INACTIVE'. """ inactive_findings = [] new_findings_map = {} for finding_list in new_findings: finding_id = finding_list[0] finding = finding_list[1] add_to_dict = {finding_id: finding} new_findings_map.update(add_to_dict) for finding_list in findings_in_cscc: finding_id = finding_list[0] to_be_updated_finding = finding_list[1] if finding_id not in new_findings_map: to_be_updated_finding['state'] = 'INACTIVE' current_time = date_time.get_utc_now_datetime() actual_time = current_time.strftime( string_formats.TIMESTAMP_TIMEZONE) to_be_updated_finding['event_time'] = actual_time inactive_findings.append([finding_id, to_be_updated_finding]) return inactive_findings
def create(self, violations, scanner_index_id): """Save violations to the db table. Args: violations (list): A list of violations. scanner_index_id (int): id of the `ScannerIndex` row for this scanner run. """ created_at_datetime = date_time.get_utc_now_datetime() for violation in violations: violation_hash = _create_violation_hash( violation.get('full_name', ''), violation.get('resource_data', ''), violation.get('violation_data', ''), ) violation = Violation( created_at_datetime=created_at_datetime, full_name=violation.get('full_name'), resource_data=violation.get('resource_data'), resource_name=violation.get('resource_name'), resource_id=violation.get('resource_id'), resource_type=violation.get('resource_type'), rule_index=violation.get('rule_index'), rule_name=violation.get('rule_name'), scanner_index_id=scanner_index_id, violation_data=json.dumps(violation.get('violation_data'), sort_keys=True), violation_hash=violation_hash, violation_message=violation.get('violation_message', ''), violation_type=violation.get('violation_type')) self.session.add(violation)
def find_match_rotation_period(cls, key, rotation_period, mode): """Check if there is a match for this rule rotation period against the given resource. If the mode is whitelist and days since the key was last rotated is less than or equals to the rotation period specified then there is no violation. If the mode is blacklist and days since the key was last rotated is greater than the rotation period specified then there is a violation. Args: key (Resource): The resource to check for a match. mode (string): The mode specified in the rule. rotation_period (string): The cut off rotation schedule of crypto key specified in rule file. Returns: bool: Returns true if a match is found. """ LOGGER.debug('Formatting rotation time...') creation_time = key.primary_version.get('createTime') scan_time = date_time.get_utc_now_datetime() last_rotation_time = creation_time[:-5] formatted_last_rotation_time = datetime.datetime.strptime( last_rotation_time, string_formats.TIMESTAMP_MICROS) days_since_rotated = (scan_time - formatted_last_rotation_time).days if mode == BLACKLIST and days_since_rotated > rotation_period: return True elif mode == WHITELIST and days_since_rotated <= rotation_period: return True return False
def bucket_conditions_guarantee_min(conditions, min_retention): """Check if other conditions can guarantee minimum retention. Args: conditions (dict): the condition dict of the bucket min_retention (int): the value of minimum retention. Returns: bool: True: min is guaranteed even if age is too small. """ age = conditions.get('age') if age is not None and age >= min_retention: return True # if createdBefore is old enough, it's OK. if 'createdBefore' in conditions: created_before = conditions['createdBefore'] dt_cfg = dt.get_datetime_from_string(created_before, '%Y-%m-%d') dt_now = dt.get_utc_now_datetime() day_diff = (dt_now - dt_cfg).days if day_diff >= min_retention: return True # if number of new version is larger than 0, OK. if conditions.get('numNewerVersions', 0) >= 1: return True return False
def complete(self, status=IndexState.SUCCESS): """Mark the inventory as completed with a final inventory_status. Args: status (str): Final inventory_status. """ self.completed_at_datetime = date_time.get_utc_now_datetime() self.inventory_status = status
def _get_output_filename(): """Create the output filename. Returns: str: The output filename for the violations json. """ now_utc = date_time.get_utc_now_datetime() output_timestamp = now_utc.strftime(string_formats.TIMESTAMP_TIMEZONE) return string_formats.CSCC_FINDINGS_FILENAME.format(output_timestamp)
def complete(self, status=IndexState.SUCCESS): """Mark the scanner as completed with a final scanner_status. Args: status (str): Final scanner_status. """ self.completed_at_datetime = date_time.get_utc_now_datetime() self.scanner_status = status
def purge(self, retention_days): """Purge the gcp_inventory data that's older than the retention days. Args: retention_days (string): Days of inventory tables to retain. Returns: str: Purge result. """ LOGGER.info('retention_days is: %s', retention_days) if not retention_days: LOGGER.info('retention_days is not specified. Will use ' 'configuration default.') retention_days = ( self.config.inventory_config.retention_days) retention_days = int(retention_days) if retention_days < 0: result_message = 'Purge is disabled. Nothing will be purged.' LOGGER.info(result_message) return result_message utc_now = date_time.get_utc_now_datetime() cutoff_datetime = ( utc_now - datetime.timedelta(days=retention_days)) LOGGER.info('Cut-off datetime to start purging is: %s', cutoff_datetime) with self.config.scoped_session() as session: inventory_indexes_to_purge = ( DataAccess.get_inventory_indexes_older_than_cutoff( session, cutoff_datetime)) if not inventory_indexes_to_purge: result_message = 'No inventory to be purged.' LOGGER.info(result_message) return result_message purged_inventory_indexes = [] for inventory_index in inventory_indexes_to_purge: _ = self.delete(inventory_index.id) purged_inventory_indexes.append(str(inventory_index.id)) purged_inventory_indexes_as_str = ', '.join(purged_inventory_indexes) result_message = ( 'Inventory data from these inventory indexes have ' 'been purged: {}').format(purged_inventory_indexes_as_str) LOGGER.info(result_message) return result_message
def create(cls): """Create a new inventory index row. Returns: InventoryIndex: InventoryIndex row object. """ utc_now = date_time.get_utc_now_datetime() micro_timestamp = date_time.get_utc_now_microtimestamp(utc_now) return InventoryIndex(id=micro_timestamp, created_at_datetime=utc_now, completed_at_datetime=None, inventory_status=IndexState.CREATED, schema_version=CURRENT_SCHEMA, counter=0)
def _get_output_filename(self, filename_template): """Create the output filename. Args: filename_template (string): template to use for the output filename Returns: str: The output filename for the inventory summary file. """ utc_now_datetime = date_time.get_utc_now_datetime() output_timestamp = utc_now_datetime.strftime( string_formats.TIMESTAMP_TIMEZONE_FILES) return filename_template.format(str(self.inventory_index_id), output_timestamp)
def create(cls, inv_index_id): """Create a new scanner index row. Args: inv_index_id (str): Id of the inventory index. Returns: object: ScannerIndex row object. """ utc_now = date_time.get_utc_now_datetime() micro_timestamp = date_time.get_utc_now_microtimestamp(utc_now) return ScannerIndex(id=micro_timestamp, inventory_index_id=inv_index_id, created_at_datetime=utc_now, scanner_status=IndexState.CREATED, schema_version=CURRENT_SCHEMA)
def open(self, handle=None): """Open the memory storage Args: handle (str): If None, create a new index instead of opening an existing one. Returns: str: inventory index """ if handle: handle = handle else: handle = date_time.get_utc_now_datetime().strftime( string_formats.TIMESTAMP_MICROS) return handle
def commit(self): """Commit the stored inventory.""" if self.inventory_index.inventory_index_warnings: status = IndexState.PARTIAL_SUCCESS elif self.inventory_index.inventory_index_errors: status = IndexState.FAILURE else: status = IndexState.SUCCESS try: self.engine.execute(InventoryIndex.__table__.update().where( InventoryIndex.id == self.inventory_index.id).values( completed_at_datetime=(date_time.get_utc_now_datetime()), inventory_status=status, counter=self.inventory_index.counter, inventory_index_errors=( self.inventory_index.inventory_index_errors), inventory_index_warnings=( self.inventory_index.inventory_index_warnings), message=self.inventory_index.message)) finally: self.session_completed = True
def find_violations(self, service_account): """Find service account key age violations based on the max_age. Args: service_account (ServiceAccount): ServiceAccount object. Returns: list: Returns a list of RuleViolation named tuples """ # Note: We're checking the age as of "now", the scanner run time # We could consider changing this to when the key was inventoried. scan_time = date_time.get_utc_now_datetime() violations = [] for key in service_account.keys: key_id = key.get('key_id') full_name = key.get('full_name') LOGGER.debug('Checking key rotation for %s', full_name) created_time = key.get('valid_after_time') if self._is_more_than_max_age(created_time, scan_time): violation_reason = ('Key ID %s not rotated since %s.' % (key_id, created_time)) violations.append(RuleViolation( resource_type=resource_mod.ResourceType.SERVICE_ACCOUNT_KEY, resource_id=service_account.email, resource_name=service_account.email, service_account_name=service_account.display_name, full_name=full_name, rule_name='%s (older than %s days)' % (self.rule_name, self.key_max_age), rule_index=self.rule_index, violation_type='SERVICE_ACCOUNT_KEY_VIOLATION', violation_reason=violation_reason, project_id=service_account.project_id, key_id=key_id, key_created_time=created_time, resource_data=json.dumps(key, sort_keys=True))) return violations
def run(self, project_policies, prechange_callback=None, new_result_callback=None, add_rule_callback=None): """Runs the enforcer over all projects passed in to the function. Args: project_policies (iterable): An iterable of (project_id, firewall_policy) tuples to enforce or a dictionary in the format {project_id: firewall_policy} prechange_callback (Callable): A callback function that will get called if the firewall policy for a project does not match the expected policy, before any changes are actually applied. If the callback returns False then no changes will be made to the project. If it returns True then the changes will be pushed. See FirewallEnforcer.apply_firewall() docstring for more details. new_result_callback (Callable): An optional function to call with each new result proto message as they are returned from a ProjectEnforcer thread. add_rule_callback (Callable): A callback function that checks whether a firewall rule should be applied. If the callback returns False, that rule will not be modified. Returns: enforcer_log_pb2.EnforcerLog: The EnforcerLog proto for the last run, including individual results for each project, and a summary of all results. """ if self._dry_run: LOGGER.info('Simulating changes') if isinstance(project_policies, dict): project_policies = list(project_policies.items()) self.enforcement_log.Clear() self.enforcement_log.summary.projects_total = len(project_policies) started_time = date_time.get_utc_now_datetime() LOGGER.info('starting enforcement wave: %s', started_time) projects_enforced_count = self._enforce_projects( project_policies, prechange_callback, new_result_callback, add_rule_callback) finished_time = date_time.get_utc_now_datetime() started_timestamp = date_time.get_utc_now_unix_timestamp(started_time) finished_timestamp = date_time.get_utc_now_unix_timestamp( finished_time) total_time = finished_timestamp - started_timestamp LOGGER.info('finished wave in %i seconds', total_time) self.enforcement_log.summary.timestamp_start_msec = ( date_time.get_utc_now_microtimestamp(started_time)) self.enforcement_log.summary.timestamp_end_msec = ( date_time.get_utc_now_microtimestamp(finished_time)) self._summarize_results() if not projects_enforced_count: LOGGER.warning('No projects enforced on the last run, exiting.') return self.enforcement_log