コード例 #1
0
def load_resources_yaml(filename):
    """Load kubernetes resources from yaml file and parses
  into structured format.

  Args:
    filename: A str, the name of the manifest file.

  Returns:
    A list of structured kubernetes resources"""

    log.info("Reading " + filename)
    with open(filename, "r", encoding='utf-8') as stream:
        content = stream.read()
        return parse_resources_yaml(content)
コード例 #2
0
def dump(outfile, resources, included_kinds, app_name, app_uid,
         app_api_version):
    to_be_dumped = []
    for resource in resources:
        if included_kinds is None or resource["kind"] in included_kinds:
            log.info("Application '{:s}' owns '{:s}/{:s}'", app_name,
                     resource["kind"], resource["metadata"]["name"])
            resource = copy.deepcopy(resource)
            set_resource_ownership(app_uid=app_uid,
                                   app_name=app_name,
                                   app_api_version=app_api_version,
                                   resource=resource)
        to_be_dumped.append(resource)
    yaml.safe_dump_all(to_be_dumped,
                       outfile,
                       default_flow_style=False,
                       indent=2)
コード例 #3
0
    def maybe_assign_ownership(resource):
        if resource["kind"] in _CLUSTER_SCOPED_KINDS:
            # Cluster-scoped resources cannot be owned by a namespaced resource:
            # https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/#owners-and-dependents
            # Set the namespace as owner if provided, otherwise leave unowned.
            if namespace and namespace_uid:
                log.info("Namespace '{:s}' owns '{:s}/{:s}'", namespace,
                         resource["kind"], resource["metadata"]["name"])
                set_namespace_resource_ownership(namespace_uid=namespace_uid,
                                                 namespace_name=namespace,
                                                 resource=resource)
            else:
                log.info(
                    "Application '{:s}' does not own cluster-scoped '{:s}/{:s}'",
                    app_name, resource["kind"], resource["metadata"]["name"])

        # Deployer-owned resources should not be owned by the Application, as
        # they should be deleted with the deployer service account (not the app).
        if deployer_name and deployer_uid and should_be_deployer_owned(
                resource):
            log.info("ServiceAccount '{:s}' owns '{:s}/{:s}'", deployer_name,
                     resource["kind"], resource["metadata"]["name"])
            resource = copy.deepcopy(resource)
            set_service_account_resource_ownership(account_uid=deployer_uid,
                                                   account_name=deployer_name,
                                                   resource=resource)
        elif included_kinds is None or resource["kind"] in included_kinds:
            log.info("Application '{:s}' owns '{:s}/{:s}'", app_name,
                     resource["kind"], resource["metadata"]["name"])
            resource = copy.deepcopy(resource)
            set_app_resource_ownership(app_uid=app_uid,
                                       app_name=app_name,
                                       app_api_version=app_api_version,
                                       resource=resource)

        return resource
コード例 #4
0
def process(schema, values, deployer_image, deployer_entrypoint, version_repo,
            image_pull_secret, deployer_service_account_name):
  props = {}
  manifests = []
  app_name = get_name(schema, values)
  namespace = get_namespace(schema, values)

  # Inject DEPLOYER_IMAGE property values if not already present.
  values = inject_deployer_image_properties(values, schema, deployer_image)

  # Handle provisioning of reporting secrets from storage if a URI
  # is provided.
  for key, value in values.items():
    if key not in schema.properties:
      continue
    if not schema.properties[key].reporting_secret:
      continue
    if '://' in value:
      value, storage_manifests = provision_from_storage(
          key, value, app_name=app_name, namespace=namespace)
      values[key] = value
      manifests += storage_manifests

  for prop in schema.properties.values():
    if prop.name in values:
      # The value has been explicitly specified. Skip.
      continue
    if prop.service_account:
      value, sa_manifests = provision_service_account(
          schema,
          prop,
          app_name=app_name,
          namespace=namespace,
          image_pull_secret=image_pull_secret)
      props[prop.name] = value
      manifests += sa_manifests
    elif prop.storage_class:
      value, sc_manifests = provision_storage_class(
          schema, prop, app_name=app_name, namespace=namespace)
      props[prop.name] = value
      manifests += sc_manifests
    elif prop.xtype == config_helper.XTYPE_ISTIO_ENABLED:
      # TODO: Really populate this value.
      props[prop.name] = False
    elif prop.xtype == config_helper.XTYPE_INGRESS_AVAILABLE:
      # TODO(#360): Really populate this value.
      props[prop.name] = True
    elif prop.password:
      props[prop.name] = property_generator.generate_password(prop.password)
    elif prop.tls_certificate:
      props[prop.name] = property_generator.generate_tls_certificate()

  # Merge input and provisioned properties.
  app_params = dict(list(values.iteritems()) + list(props.iteritems()))

  use_kalm = False
  if (schema.is_v2() and
      schema.x_google_marketplace.managed_updates.kalm_supported):
    if version_repo:
      use_kalm = True
      log.info('Using KALM for deployment')
    else:
      log.warn('The deployer supports KALM but no --version-repo specified. '
               'Falling back to provisioning the deployer job only.')

  if use_kalm:
    manifests += provision_kalm(
        schema,
        version_repo=version_repo,
        app_name=app_name,
        namespace=namespace,
        deployer_image=deployer_image,
        image_pull_secret=image_pull_secret,
        app_params=app_params,
        deployer_service_account_name=deployer_service_account_name)
  else:
    manifests += provision_deployer(
        schema,
        app_name=app_name,
        namespace=namespace,
        deployer_image=deployer_image,
        deployer_entrypoint=deployer_entrypoint,
        image_pull_secret=image_pull_secret,
        app_params=app_params,
        deployer_service_account_name=deployer_service_account_name)
  return manifests
コード例 #5
0
def main():
    parser = ArgumentParser(description=_PROG_HELP)
    parser.add_argument('--namespace')
    parser.add_argument('--manifest')
    parser.add_argument('--timeout', type=int, default=300)
    args = parser.parse_args()

    try:
        Command('''
        kubectl apply
        --namespace="{}"
        --filename="{}"
        '''.format(args.namespace, args.manifest),
                print_call=True)
    except CommandException as ex:
        log.error("{} Failed to apply tester job. Reason: {}", LOG_SMOKE_TEST,
                  ex.message)
        return

    resources = load_resources_yaml(args.manifest)

    for resource_def in resources:
        full_name = "{}/{}".format(resource_def['kind'],
                                   deep_get(resource_def, 'metadata', 'name'))

        if resource_def['kind'] != 'Pod':
            log.info("Skip '{}'", full_name)
            continue

        start_time = time.time()
        poll_interval = 4
        tester_timeout = args.timeout

        while True:
            try:
                resource = Command('''
          kubectl get "{}"
          --namespace="{}"
          -o=json
          '''.format(full_name, args.namespace),
                                   print_call=True).json()
            except CommandException as ex:
                log.info(str(ex))
                log.info("retrying")
                time.sleep(poll_interval)
                continue

            result = deep_get(resource, 'status', 'phase')

            if result == "Failed":
                print_tester_logs(full_name, args.namespace)
                log.error("{} Tester '{}' failed.", LOG_SMOKE_TEST, full_name)
                break

            if result == "Succeeded":
                print_tester_logs(full_name, args.namespace)
                log.info("{} Tester '{}' succeeded.", LOG_SMOKE_TEST,
                         full_name)
                break

            if time.time() - start_time > tester_timeout:
                print_tester_logs(full_name, args.namespace)
                log.error("{} Tester '{}' timeout.", LOG_SMOKE_TEST, full_name)

            time.sleep(poll_interval)