Ejemplo n.º 1
0
    def _load_rule_definitions(self):
        """Load the rule definitions file from GCS or local filesystem.

        Returns:
            The parsed dict from the rule definitions file.
        """
        return file_loader.read_and_parse_file(self.full_rules_path)
Ejemplo n.º 2
0
def main(_):
    """Main function.

        Args:
            _ (obj): Result of the last expression evaluated in the interpreter.
    """
    if FLAGS.timestamp is not None:
        timestamp = FLAGS.timestamp
    else:
        timestamp = _get_timestamp()

    if FLAGS.config is None:
        LOGGER.error('You must specify a notification pipeline')
        exit()

    notifier_configs = FLAGS.FlagValuesDict()
    configs = file_loader.read_and_parse_file(FLAGS.config)

    # get violations
    v_dao = violation_dao.ViolationDao()
    violations = {}
    for resource in RESOURCE_MAP:
        try:
            violations[resource] = v_dao.get_all_violations(
                timestamp, RESOURCE_MAP[resource])
        except db_errors.MySQLError, e:
            # even if an error is raised we still want to continue execution
            # this is because if we don't have violations the Mysql table
            # is not present and an error is thrown
            LOGGER.error('get_all_violations error: %s', e.message)
Ejemplo n.º 3
0
def enforce_single_project(enforcer, project_id, policy_filename):
    """Runs the enforcer on a single project.

    Args:
      enforcer: An instance of the batch_enforcer.BatchFirewallEnforcer class.
      project_id: The project to enforce.
      policy_filename: The json encoded file to read the firewall policy from.

    Returns:
      The EnforcerLog proto for the last run, including individual results for
      the enforced project, and a summary of the run.
    """
    policy = file_loader.read_and_parse_file(policy_filename)

    if not isinstance(policy, list):
        raise InvalidParsedPolicyFileError(
            'Invalid parsed policy file: found %s expected %s', type(policy),
            list)

    project_policies = [(project_id, policy)]

    enforcer_results = enforcer.run(project_policies)

    for result in enforcer_results.results:
        result.gce_firewall_enforcement.policy_path = policy_filename
        result.run_context = enforcer_log_pb2.ENFORCER_ONE_PROJECT

    return enforcer_results
Ejemplo n.º 4
0
def main(argv):
    """The main entry point for Forseti Security Enforcer runner."""

    del argv

    forseti_config = FLAGS.forseti_config
    if forseti_config is None:
        LOGGER.error('Path to Forseti Security config needs to be specified.')
        sys.exit()

    try:
        configs = file_loader.read_and_parse_file(forseti_config)
    except IOError:
        LOGGER.error('Unable to open Forseti Security config file. '
                     'Please check your path and filename and try again.')
        sys.exit()
    global_configs = configs.get('global')

    enforcer = initialize_batch_enforcer(
        global_configs, FLAGS.concurrent_threads,
        FLAGS.maximum_project_writer_threads,
        FLAGS.maximum_firewall_write_operations, FLAGS.dry_run)

    if FLAGS.enforce_project and FLAGS.policy_file:
        enforcer_results = enforce_single_project(enforcer,
                                                  FLAGS.enforce_project,
                                                  FLAGS.policy_file)

        print enforcer_results

    else:
        print 'Batch mode not implemented yet.'
Ejemplo n.º 5
0
 def _setup_pipeline_builder(self, config_filename):
     inventory_configs = file_loader.read_and_parse_file(BASE_PATH +
                                                         config_filename)
     my_pipeline_builder = pipeline_builder.PipelineBuilder(
         FAKE_TIMESTAMP, inventory_configs, mock.MagicMock(),
         mock.MagicMock(), mock.MagicMock())
     my_pipeline_builder._get_api = mock.MagicMock()
     return my_pipeline_builder
Ejemplo n.º 6
0
    def _load_rule_definitions(self):
        """Load the rule definitions file from GCS or local filesystem.

        Returns:
            dict: The parsed dict from the rule definitions file.
        """
        LOGGER.debug('Loading %r rules from %r', self, self.full_rules_path)
        rules = file_loader.read_and_parse_file(self.full_rules_path)
        LOGGER.debug('Got rules: %r', rules)
        return rules
Ejemplo n.º 7
0
def main(_):
    """main function"""
    if FLAGS.timestamp is not None:
        timestamp = FLAGS.timestamp
    else:
        timestamp = _get_timestamp()

    if FLAGS.config is None:
        LOGGER.error('You must specify a notification pipeline')
        exit()

    notifier_configs = FLAGS.FlagValuesDict()
    configs = file_loader.read_and_parse_file(FLAGS.config)

    # get violations
    v_dao = violation_dao.ViolationDao()
    violations = {
        'violations':
        v_dao.get_all_violations(timestamp, 'violations'),
        'bucket_acl_violations':
        v_dao.get_all_violations(timestamp, 'buckets_acl_violations')
    }
    for retrieved_v in violations:
        LOGGER.info('retrieved %d violations for resource \'%s\'',
                    len(violations[retrieved_v]), retrieved_v)

    # build notification pipelines
    pipelines = []
    for resource in configs['resources']:
        if violations.get(resource['resource']) is None:
            LOGGER.error('The resource name \'%s\' is invalid, skipping',
                         resource['resource'])
            continue
        if resource['should_notify'] is False:
            continue
        for pipeline in resource['pipelines']:
            LOGGER.info('Running \'%s\' pipeline for resource \'%s\'',
                        pipeline['name'], resource['resource'])
            chosen_pipeline = find_pipelines(pipeline['name'])
            pipelines.append(
                chosen_pipeline(resource['resource'], timestamp,
                                violations[resource['resource']],
                                notifier_configs, pipeline['configuration']))

    # run the pipelines
    for pipeline in pipelines:
        pipeline.run()
Ejemplo n.º 8
0
def main(_):
    """Run the scanners.

    Args:
        _ (list): argv, unused due to apputils.
    """
    forseti_config = FLAGS.forseti_config
    if forseti_config is None:
        LOGGER.error('Path to Forseti Security config needs to be specified.')
        sys.exit()

    try:
        configs = file_loader.read_and_parse_file(forseti_config)
    except IOError:
        LOGGER.error('Unable to open Forseti Security config file. '
                     'Please check your path and filename and try again.')
        sys.exit()
    global_configs = configs.get('global')
    scanner_configs = configs.get('scanner')

    log_util.set_logger_level_from_config(scanner_configs.get('loglevel'))

    snapshot_timestamp = _get_timestamp(global_configs)
    if not snapshot_timestamp:
        LOGGER.warn('No snapshot timestamp found. Exiting.')
        sys.exit()

    runnable_scanners = scanner_builder.ScannerBuilder(
        global_configs, scanner_configs, snapshot_timestamp).build()

    # TODO: Make resilient by letting the batch continue to run even if one
    # scanner errors out.
    # TODO: fix the bare except
    # pylint: disable=bare-except
    for scanner in runnable_scanners:
        try:
            scanner.run()
        except:
            LOGGER.error('Error running scanner: %s',
                         scanner.__class__.__name__,
                         exc_info=True)
    # pylint: enable=bare-except

    LOGGER.info('Scan complete!')
Ejemplo n.º 9
0
def main(_):
    """Main function.

        Args:
            _ (obj): Result of the last expression evaluated in the interpreter.
    """
    notifier_flags = FLAGS.FlagValuesDict()

    forseti_config = notifier_flags.get('forseti_config')

    if forseti_config is None:
        LOGGER.error('Path to Forseti Security config needs to be specified.')
        sys.exit()

    try:
        configs = file_loader.read_and_parse_file(forseti_config)
    except IOError:
        LOGGER.error('Unable to open Forseti Security config file. '
                     'Please check your path and filename and try again.')
        sys.exit()
    global_configs = configs.get('global')
    notifier_configs = configs.get('notifier')

    timestamp = notifier_configs.get('timestamp')
    if timestamp is None:
        timestamp = _get_timestamp(global_configs)

    # get violations
    v_dao = violation_dao.ViolationDao(global_configs)
    violations_as_dict = v_dao.get_all_violations(timestamp)

    for i in violations_as_dict:
        i['created_at_datetime'] = (
            i.get('created_at_datetime').strftime('%Y-%m-%dT%H:%M:%SZ'))

    violations = {}
    try:
        violations = violation_dao.map_by_resource(violations_as_dict)
    except db_errors.MySQLError, e:
        # even if an error is raised we still want to continue execution
        # this is because if we don't have violations the Mysql table
        # is not present and an error is thrown
        LOGGER.error('get_all_violations error: %s', e.message)
Ejemplo n.º 10
0
    def _build_dependency_tree(self):
        """Build the dependency tree with all the pipeline nodes.

        Returns:
            PipelineNode representing the top-level starting point
                of the pipeline dependency tree. The entire pipeline
                dependency tree are children of this root.

                Example:
                root.resource_name = 'organizations'
                root.enabled = True
                root.parent = None
                root.children = (pipeline_node1, pipeline_node2, ...)
        """
        # First pass: map all the pipelines to their own nodes,
        # regardless if they should run or not.
        map_of_all_pipeline_nodes = {}

        config = file_loader.read_and_parse_file(self.config_path)
        configured_pipelines = config.get('pipelines', [])

        for entry in configured_pipelines:
            map_of_all_pipeline_nodes[entry.get('resource')] = PipelineNode(
                entry.get('resource'), entry.get('enabled'))

        # Another pass: build the dependency tree by setting the parents
        # correctly on all the nodes.
        for entry in configured_pipelines:
            parent_name = (pipeline_requirements_map.REQUIREMENTS_MAP.get(
                entry.get('resource')).get('depends_on'))
            if parent_name is not None:
                parent_node = map_of_all_pipeline_nodes[parent_name]
                map_of_all_pipeline_nodes[entry.get('resource')].parent = (
                    parent_node)

        # Assume root is organizations.
        return map_of_all_pipeline_nodes.get('organizations')
Ejemplo n.º 11
0
def main(_):
    """Runs the Inventory Loader.

    Args:
        _ (list): args that aren't used
    """
    del _
    inventory_flags = FLAGS.FlagValuesDict()

    if inventory_flags.get('list_resources'):
        inventory_util.list_resource_pipelines()
        sys.exit()

    forseti_config = inventory_flags.get('forseti_config')
    if forseti_config is None:
        LOGGER.error('Path to Forseti Security config needs to be specified.')
        sys.exit()

    try:
        configs = file_loader.read_and_parse_file(forseti_config)
    except IOError:
        LOGGER.error('Unable to open Forseti Security config file. '
                     'Please check your path and filename and try again.')
        sys.exit()
    global_configs = configs.get('global')
    inventory_configs = configs.get('inventory')

    log_util.set_logger_level_from_config(inventory_configs.get('loglevel'))

    dao_map = _create_dao_map(global_configs)

    cycle_time, cycle_timestamp = _start_snapshot_cycle(dao_map.get('dao'))

    pipeline_builder = builder.PipelineBuilder(
        cycle_timestamp,
        inventory_configs,
        global_configs,
        api_map.API_MAP,
        dao_map)
    pipelines = pipeline_builder.build()

    run_statuses = _run_pipelines(pipelines)

    if all(run_statuses):
        snapshot_cycle_status = 'SUCCESS'
    elif any(run_statuses):
        snapshot_cycle_status = 'PARTIAL_SUCCESS'
    else:
        snapshot_cycle_status = 'FAILURE'

    _complete_snapshot_cycle(dao_map.get('dao'), cycle_timestamp,
                             snapshot_cycle_status)

    if global_configs.get('email_recipient') is not None:
        payload = {
            'email_sender': global_configs.get('email_sender'),
            'email_recipient': global_configs.get('email_recipient'),
            'sendgrid_api_key': global_configs.get('sendgrid_api_key'),
            'cycle_time': cycle_time,
            'cycle_timestamp': cycle_timestamp,
            'snapshot_cycle_status': snapshot_cycle_status,
            'pipelines': pipelines
        }
        message = {
            'status': 'inventory_done',
            'payload': payload
        }
        notifier.process(message)