Exemple #1
0
def setup_new_project(config, starting_step, output_yaml_path):
    """Run the full process for initalizing a single new project.

  Args:
    config (ProjectConfig): The config of a single project to setup.
    starting_step (int): The step number (indexed from 1) in _SETUP_STEPS to
      begin from.
    output_yaml_path (str): Path to output resulting root config in JSON.

  Returns:
    A boolean, true if the project was deployed successfully, false otherwise.
  """
    steps = _SETUP_STEPS + config.extra_steps

    total_steps = len(steps)
    for step_num in range(starting_step, total_steps + 1):
        logging.info('Step %s/%s', step_num, total_steps)
        try:
            steps[step_num - 1](config)
        except subprocess.CalledProcessError as e:
            logging.error('Setup failed on step %s: %s', step_num, e)
            logging.error(
                'To continue the script, sync the input file with the output file at '
                '--output_yaml_path and re run the script with additional flags: '
                '--resume_from_project=%s --resume_from_step=%s',
                config.project['project_id'], step_num)
            return False
        utils.write_yaml_file(config.root, output_yaml_path)

    logging.info('Setup completed successfully.')
    return True
Exemple #2
0
def _write_rules(deployment_config, directory):
  """Write a rules yaml file for each generator to the given directory."""
  project_configs, global_config = get_all_project_configs(deployment_config)
  for generator in SCANNER_RULE_GENERATORS:
    config_file_name = generator.config_file_name()
    logging.info('Generating rules for %s', config_file_name)
    rules = generator.generate_rules(project_configs, global_config)
    path = os.path.join(directory, config_file_name)
    utils.write_yaml_file(rules, path)
Exemple #3
0
def write_generated_fields(config):
    """Write gen fields to --generated_fields_path if set, else --project_yaml."""
    if FLAGS.generated_fields_path:
        utils.write_yaml_file(config.generated_fields,
                              FLAGS.generated_fields_path)
    else:
        config.root['generated_fields'] = config.generated_fields
        field_generation.rewrite_generated_fields_back(FLAGS.project_yaml,
                                                       config.root)
def move_generated_fields_out_of_projects(input_yaml_path):
    """Move generated_fields out of projects."""
    overall = utils.load_config(input_yaml_path)
    if GENERATED_FIELDS_NAME in overall:
        if is_old_generated_fields_format_exist(overall):
            raise utils.InvalidConfigError(
                'Generated fields conflict between new and old format.')
        return False
    convert_old_generated_fields_to_new(overall)
    if GENERATED_FIELDS_NAME in overall:
        if utils.wait_for_yes_no(
                'Move generated_fields out of projects [y/N]?'):
            utils.write_yaml_file(overall, input_yaml_path)
        return True
    return False
def setup_project(config, starting_step, output_yaml_path):
    """Run the full process for initalizing a single new project.

  Note: for projects that have already been deployed, only the updatable steps
  will be run.

  Args:
    config (ProjectConfig): The config of a single project to setup.
    starting_step (int): The step number (indexed from 1) in _SETUP_STEPS to
      begin from.
    output_yaml_path (str): Path to output resulting root config in JSON.

  Returns:
    A boolean, true if the project was deployed successfully, false otherwise.
  """
    steps = _SETUP_STEPS + config.extra_steps
    deployed = is_deployed(config.project)

    if deployed and not FLAGS.allow_updates:
        logging.fatal('must pass --allow_updates to support updates.')

    total_steps = len(steps)
    for step_num in range(starting_step, total_steps + 1):
        logging.info('Step %s/%s', step_num, total_steps)
        step = steps[step_num - 1]

        if deployed and not step.updatable:
            logging.info('Step %s/%s is not updatable, skipping', step_num,
                         total_steps)
            continue
        try:
            step.func(config)
        except subprocess.CalledProcessError as e:
            logging.error('Setup failed on step %s: %s', step_num, e)
            logging.error(
                'To continue the script, sync the input file with the output file at '
                '--output_yaml_path and re run the script with additional flags: '
                '--resume_from_project=%s --resume_from_step=%s',
                config.project['project_id'], step_num)
            return False
        utils.write_yaml_file(config.root, output_yaml_path)

    logging.info('Setup completed successfully.')
    return True
def write_generated_fields(config):
    """Write gen fields to --generated_fields_path."""
    utils.write_yaml_file(config.generated_fields, FLAGS.generated_fields_path)
def setup_project(config, output_yaml_path, output_cleanup_path):
  """Run the full process for initalizing a single new project.

  Note: for projects that have already been deployed, only the updatable steps
  will be run.

  Args:
    config (ProjectConfig): The config of a single project to setup.
    output_yaml_path (str): Path to output resulting root config in JSON.
    output_cleanup_path (str): Path to output cleanup shell script.

  Returns:
    A boolean, true if the project was deployed successfully, false otherwise.
  """
  steps = _SETUP_STEPS + config.extra_steps

  starting_step = config.project.get(_GENERATED_FIELDS_NAME, {}).get(
      'failed_step', 1)

  deployed = is_deployed(config.project)

  total_steps = len(steps)
  for step_num in range(starting_step, total_steps + 1):
    step = steps[step_num - 1]
    logging.info('%s: step %d/%d (%s)', config.project['project_id'], step_num,
                 total_steps, step.description)

    if deployed and not step.updatable:
      logging.info('Step %d is not updatable, skipping', step_num)
      continue

    try:
      output = step.func(config)
      if output and output.cleanup_commands:
        with open(output_cleanup_path, 'a') as f:
          for cmd in output.cleanup_commands:
            f.write('# {}\n'.format(cmd))
    except Exception as e:  # pylint: disable=broad-except
      logging.error('%s: setup failed on step %s: %s',
                    config.project['project_id'], step_num, e)
      logging.error(
          'Failure information has been written to --output_yaml_path. '
          'Please ensure the config at --project_yaml is updated with any '
          'changes from the config at --output_yaml_path and re-run the script'
          '(Note: only applicable if --output_yaml_path != --project_yaml)')

      # only record failed step if project was undeployed, an update can always
      # start from the beginning
      if not deployed:
        generated_fields = config.project.get('generated_fields')
        if not generated_fields:
          generated_fields = {}
          config.project['generated_fields'] = generated_fields
        generated_fields['failed_step'] = step_num
        utils.write_yaml_file(config.root, output_yaml_path)

      return False

    utils.write_yaml_file(config.root, output_yaml_path)

  # if this deployment was resuming from a previous failure, remove the
  # failed step as it is done
  config.project.get(_GENERATED_FIELDS_NAME, {}).pop('failed_step', None)
  utils.write_yaml_file(config.root, output_yaml_path)
  logging.info('Setup completed successfully.')

  return True
def main(argv):
  del argv  # Unused.

  input_yaml_path = utils.normalize_path(FLAGS.project_yaml)
  output_yaml_path = utils.normalize_path(FLAGS.output_yaml_path)
  output_rules_path = (utils.normalize_path(FLAGS.output_rules_path)
                       if FLAGS.output_rules_path else None)

  # Output YAML will rearrange fields and remove comments, so do a basic check
  # against accidental overwriting.
  if input_yaml_path == output_yaml_path:
    logging.error('output_yaml_path cannot overwrite project_yaml.')
    return

  # Read and parse the project configuration YAML file.
  root_config = utils.resolve_env_vars(utils.read_yaml_file(input_yaml_path))
  if not root_config:
    logging.error('Error loading project YAML.')
    return

  logging.info('Validating project YAML against schema.')
  try:
    utils.validate_config_yaml(root_config)
  except jsonschema.exceptions.ValidationError as e:
    logging.error('Error in YAML config: %s', e)
    return

  overall = root_config['overall']
  audit_logs_project = root_config.get('audit_logs_project')

  projects = []
  # Always deploy the remote audit logs project first (if present).
  if audit_logs_project:
    projects.append(ProjectConfig(overall=overall,
                                  project=audit_logs_project,
                                  audit_logs_project=None))

  forseti_config = root_config.get('forseti', {})

  if forseti_config:
    forseti_project_config = ProjectConfig(
        overall=overall,
        project=forseti_config.get('project'),
        audit_logs_project=audit_logs_project)
    projects.append(forseti_project_config)

  for project_config in root_config.get('projects', []):
    projects.append(ProjectConfig(overall=overall,
                                  project=project_config,
                                  audit_logs_project=audit_logs_project))

  validate_project_configs(overall, projects)

  # If resuming setup from a particular project, skip to that project.
  if FLAGS.resume_from_project:
    while (projects and
           projects[0].project['project_id'] != FLAGS.resume_from_project):
      skipped = projects.pop(0)
      logging.info('Skipping project %s', skipped.project['project_id'])
    if not projects:
      logging.error('Project not found: %s', FLAGS.resume_from_project)

  if projects:
    starting_step = max(1, FLAGS.resume_from_step)
    for config in projects:
      logging.info('Setting up project %s', config.project['project_id'])
      if not setup_new_project(config, starting_step):
        # Don't attempt to deploy additional projects if one project failed.
        return

      # Fill in unset generated fields for the project and save it.
      add_generated_fields(config.project)
      utils.write_yaml_file(root_config, output_yaml_path)
      starting_step = 1
  else:
    logging.error('No projects to deploy.')

  # TODO: allow for forseti installation to be retried.
  if forseti_config:
    forseti_project_id = forseti_config['project']['project_id']
    forseti_service_account = forseti_config.get('generated_fields',
                                                 {}).get('service_account')

    # If the forseti block does not have generated fields from a previous
    # deployment, consider Forseti to be undeployed.
    # TODO: add more checks of a previous deployment.
    if 'generated_fields' not in forseti_config:
      forseti.install(forseti_config)

      forseti_service_account = forseti.get_server_service_account(
          forseti_project_id)

      forseti_config['generated_fields'] = {
          'service_account': forseti_service_account,
          'server_bucket': forseti.get_server_bucket(forseti_project_id),
      }
      utils.write_yaml_file(root_config, output_yaml_path)

    for project in projects:
      project_id = project.project['project_id']
      forseti.grant_access(project_id,
                           forseti_service_account)
    rule_generator.run(root_config, output_path=output_rules_path)
def rewrite_generated_fields_back(project_yaml, output_yaml_path, new_config):
    """Write config file to output_yaml_path with new generated_fields."""
    cfg_content = update_generated_fields(project_yaml, new_config)
    utils.write_yaml_file(cfg_content, output_yaml_path)