Exemple #1
0
    def __init__(self,
                 credentials_descriptor):

        validate(credentials_descriptor, "yac/schema/stacks/k8s/credentialer.json")

        self.namespace = search("namespace",
                               credentials_descriptor,"")

        self.clusters = search("clusters",
                               credentials_descriptor,
                               ["nonprod","prod"])

        # if tokens are input there should be one per cluster
        self.tokens = search("tokens",
                               credentials_descriptor,
                               [])

        self.secrets = Secrets(search('"Secrets"',
                               credentials_descriptor,{}))

        # initialize the inputs (for driving user prompts)
        self.inputs = Inputs(search("Inputs",
                                    credentials_descriptor,{}))

        # for integration testing it is useful to write files to a
        # root director other than user's home
        self.rootdir = search("rootdir",
                              credentials_descriptor,"")
    def __init__(self,
                 serialized_artifact):

        validate(serialized_artifact, "yac/schema/makers/container_image.json")

        self.name = search("name",serialized_artifact,"")
        self.description = search("description",serialized_artifact,"")

        self.image = search("image",serialized_artifact)

        # the registry where the images should be pushed
        # defaults to artifactory
        self.registry = search('registry',
                                serialized_artifact,
                                ARTIFACTORY_URL)

        # initialize the inputs (for driving user prompts)
        self.inputs = Inputs(search("Inputs",
                                    serialized_artifact,{}))

        self.secrets = Secrets(search('"Secrets"',
                                    serialized_artifact,{}))

        # client for most operations
        self.client = docker.DockerClient('tcp://%s:%s'%(BUILDER_HOST,
                                                         BUILDER_PORT))

        # client for "low-level" build operations (e.g. builds that send
        # the details on each layer built to stdout )
        # TODO: figure out why auth isn't working from inside a container
        # with this one
        self.api_client = docker.APIClient('tcp://%s:%s'%(BUILDER_HOST,
                                                         BUILDER_PORT))
Exemple #3
0
    def test_secrets(self):

        my_secrets = {
            "param-key-1": {
                "comment": "branch 1, child 1, entry 1",
                "source": "keepass",
                "lookup": {
                    "path": "Branch 1/B1-C1/B1-C1-E1",
                    "field": "password"
                }
            },
            "param-key-2": {
                "comment": "branch 2, child 2, entry 1",
                "source": "keepass",
                "lookup": {
                    "path": "Branch 2/B2-C2/B2-C2-E1",
                    "field": "password"
                }
            }
        }

        secrets_vaults = [{
            "type": "keepass",
            "name": "keepass",
            "configs": {
                "vault-path": "yac/tests/vault/vectors/test_vault.kdbx",
                "vault-pwd-path": TestCase.pwd_path
            }
        }]

        params = Params({})

        vaults = SecretVaults(secrets_vaults)

        secrets = Secrets(my_secrets)

        secrets.load(params, vaults)

        print(secrets.get_errors())

        both_loaded = (params.get("param-key-1") == 'b1-c1-e1-secret'
                       and params.get("param-key-2") == 'b2-c2-e1-secret')

        self.assertTrue(both_loaded)
Exemple #4
0
    def __init__(self, serialized_artifact):

        validate(serialized_artifact, "yac/schema/makers/ami.json")

        self.name = search('name', serialized_artifact)

        self.description = search('description', serialized_artifact)

        # the aws profile aliasing the account to build in
        self.profile = search('profile', serialized_artifact)

        # path to the packer file
        self.packer_file = search('"packer-file"', serialized_artifact)

        # directory containing files that should be included in the build
        self.packable_dir = search('"packer-dir"', serialized_artifact, "")

        self.secrets = Secrets(search('"Secrets"', serialized_artifact, {}))

        # initialize the inputs (for driving user prompts)
        self.inputs = Inputs(search("Inputs", serialized_artifact, {}))
Exemple #5
0
    def test_schema_good(self):

        serialized_obj = {
            "wlwxgittest-token": {
                "comment":
                "api token for the wlwxgittest user. wrapped service alias in quotes due to jsonpath cannot do lookup for paths with a hyphen",
                "source": "main",
                "lookup": {
                    "yac-join": [
                        ".",
                        [
                            "wlwxgittest", "tokens", {
                                "yac-join": [
                                    "",
                                    ["\"", {
                                        "yac-ref": "service-alias"
                                    }, "\""]
                                ]
                            }, {
                                "yac-ref": "env"
                            }
                        ]
                    ]
                }
            }
        }

        # test that no schema validation errors are raised
        validation_success = True
        try:
            secrets = Secrets(serialized_obj)
        except ValidationError as e:
            validation_success = False
            print("validation failed")

        self.assertTrue(validation_success == True)
Exemple #6
0
class NordstromK8sCredentialer():

    def __init__(self,
                 credentials_descriptor):

        validate(credentials_descriptor, "yac/schema/stacks/k8s/credentialer.json")

        self.namespace = search("namespace",
                               credentials_descriptor,"")

        self.clusters = search("clusters",
                               credentials_descriptor,
                               ["nonprod","prod"])

        # if tokens are input there should be one per cluster
        self.tokens = search("tokens",
                               credentials_descriptor,
                               [])

        self.secrets = Secrets(search('"Secrets"',
                               credentials_descriptor,{}))

        # initialize the inputs (for driving user prompts)
        self.inputs = Inputs(search("Inputs",
                                    credentials_descriptor,{}))

        # for integration testing it is useful to write files to a
        # root director other than user's home
        self.rootdir = search("rootdir",
                              credentials_descriptor,"")

    def create(self,
               params,
               vaults,
               overwrite_bool):

        self.params = params

        # process creds-specific inputs and load results into params
        self.inputs.load(self.params)

        # load any creds-specific secrets into params
        self.secrets.load(self.params,vaults)

        # determine if credentialer is being instantiated on a developer
        # desktop
        self.is_desktop = self.running_on_desktop()

        err = ""
        if overwrite_bool or not self.created_today(".kube/config"):

            # make sure certs are in place
            err = self.install_root_ca_certs()

            # install ~/.kubeloginrc.yaml file
            # note: file is only needed when running on a desktop
            if not err and self.is_desktop:
                self.install_kube_login()

            # create ~/.kube/config file
            if not err:
                self.install_kube_config(params)

            if not err and not self.tokens:
                # explain how to create tokens for the target clusters
                self.token_help()

        else:
            print("k8s ~/.kube/config file has fresh tokens so won't be updated ...")

        return err

    def token_help(self):

        for cluster in self.clusters:

            self.show_kubelogin_help(cluster)

        input("press <enter> when you've completed the kubelogin for all clusters ... >> ")

    def install_root_ca_certs(self):

        # install each cert
        err = self.download_cert(PROD1_CERT_URL,".kube/certs/prod1.pem")

        # stop after the first error
        if not err:
            err = self.download_cert(PROD2_CERT_URL,".kube/certs/prod2.pem")

        if not err:

            if self.is_desktop:
                err = self.download_cert(NONPROD_PRIVATE_CERT_URL,".kube/certs/nonprod.pem")
            else:
                err = self.download_cert(NONPROD_PUBLIC_CERT_URL,".kube/certs/nonprod.pem")

        return err


    def download_cert(self, url, home_rel_path):

        err = ""
        r = requests.get(url, allow_redirects=True)

        if r.status_code == 200:
            self.write_home_file(r.content.decode("utf-8"),home_rel_path)
        else:
            err = "could not download root CA cert from %s"%url

        return err

    def install_kube_config(self, params):

        # use .kubeconfig.yaml as a template
        with open(os.path.join(get_root_path(),
            "lib/stacks/k8s/configs/.kubeconfig.yaml"), 'r') as config_file:
            file_contents = config_file.read()

        # render mustaches in the file ...

        # first worry about how to render the 'token' token in the
        # file

        # use placeholder values (o.w. apply_stemplate will raise TemplateError).
        # the user will need to use kubelogin to overwrite
        stock_tokens = ["via kubelogin", "via kubelogin", "via kubelogin"]

        if not self.tokens:
            # no tokens are provided via servicefile. this is a typical pattern
            # for servicefiles that are meant to be run from a developer desktop.
            tokens = stock_tokens

        else:
            tmp_tokens = stock_tokens

            # make sure there is one token per cluster
            tmp_tokens[0:len(self.tokens)] = self.tokens

            # tokens were specified in the servicefile
            # these will typically include secrets that are referenced
            # via a yac-ref intrinstric, so render intrinsics in the tokens
            tokens = apply_intrinsics(tmp_tokens, params)

        # build the params for each variable in the file
        local_params = Params({})

        # set variables for each of the cluster tokens
        cluster_keys = ["nonprod-token","prod1-token","prod2-token"]

        for i,token in enumerate(tokens):
            local_params.set(cluster_keys[i],token)

        # the namespace params supports intrinsics (so that it can be set via an input)
        namespace = apply_intrinsics(self.namespace, params)

        # set namespace variable for template rendering
        local_params.set("namespace", namespace)

        if self.is_desktop:
            # use the private api to avoid the limitations of the public
            # api endpoint, per:
            #  * https://gitlab.nordstrom.com/k8s/platform-bootstrap/wikis/Onboard-to-AWS-Kubernetes-Clusters
            local_params.set("nonprod-api-url",NONPROD_PRIVATE_API)
        else:
            # pipelines must use the public to avoid v2 account peering
            # contraints
            local_params.set("nonprod-api-url",NONPROD_PUBLIC_API)

        # do the actual mustache rendering
        rendered_file_contents = apply_stemplate(file_contents,local_params)

        # take backup of any existing .kube/config files
        self.backup_existing(".kube/config")

        # write file
        self.write_home_file(rendered_file_contents,".kube/config")

    def install_kube_login(self):

        # get the contents of the .kube/config file
        file_contents = get_file_contents('yac/lib/stacks/k8s/configs/.kubeloginrc.yaml')

        # write file
        self.write_home_file(file_contents,".kubeloginrc.yaml")

        # copy the kubelogin app under the user's home dir
        kubelogin_dest = self.get_kubelogin_path()

        if os.path.exists(kubelogin_dest):
            # remove existing installatiaon of kubelogin
            os.remove(kubelogin_dest)

        print("installing: %s"%kubelogin_dest)

        shutil.copyfile('yac/lib/stacks/k8s/configs/kubelogin',
                        kubelogin_dest)
        os.chmod(kubelogin_dest, stat.S_IREAD | stat.S_IEXEC )

    def backup_existing(self,home_rel_path):

        full_home_path = get_home_path(home_rel_path)

        if os.path.exists(full_home_path):
            # rename existing file
            timestamp = "{:%Y-%m-%d.%H.%M.%S}".format(dt.datetime.now())
            backup_filename = "%s.%s"%(full_home_path,timestamp)
            print("backing up existing ~/.kube/config file to: %s"%backup_filename)
            os.rename(full_home_path,
                      backup_filename)

    def write_home_file(self, content, home_rel_path):

        full_home_path = get_home_path(home_rel_path)

        if not os.path.exists(os.path.dirname(full_home_path)):
            os.makedirs(os.path.dirname(full_home_path))

        print("writing: %s"%(full_home_path))

        open(full_home_path, 'w').write(content)

    def show_kubelogin_help(self, cluster):

        # from the kubelogin command
        kubelogin_path = self.get_kubelogin_path()
        kubelogin_cmd = "%s login %s"%(kubelogin_path,cluster)

        print("run the following command in a separate terminal to generate credentials for the %s cluster:"%cluster)

        print("$ {0}".format( kubelogin_cmd ))

    def created_today(self, home_rel_path):

        # returns true if the the file at home_rel_path was created today
        created_today=False

        full_home_path = get_home_path(home_rel_path)

        if os.path.exists(full_home_path):

            today = dt.datetime.now().date()

            filetime = dt.datetime.fromtimestamp(os.path.getctime(full_home_path))

            if filetime.date() == today:
                created_today = True

        return created_today

    def running_on_desktop(self):
        # returns true if these credentials are being created on a developer desktop
        #
        # the distinction is important for this k8s credentialer as it determines which
        # k8s api endpoint to use. the private endpoint is the most fully featured (esp for
        # kubectl commands) but is only accessible from clients joined to the nordstrom domain.
        # the public endpoint works for most use cases AND is availble from build servers
        # running in most environments (k8s clusters, aws v2 accounts, etc).
        return self.params.get('desktop')

    def get_current_context(self):

        context_name = ""

        full_home_path = get_home_path(".kube/config", self.rootdir)

        if os.path.exists(full_home_path):

            kubernetes.config.load_kube_config()

            contexts, active_context = kubernetes.config.list_kube_config_contexts()

            if ('name' in active_context ):
                context_name = active_context['name']

        return context_name

    def get_kubelogin_path(self):

        return  get_home_path(".kube/kubelogin")
Exemple #7
0
class AMI():
    # for building AMIs (amazon machine images) using packer

    def __init__(self, serialized_artifact):

        validate(serialized_artifact, "yac/schema/makers/ami.json")

        self.name = search('name', serialized_artifact)

        self.description = search('description', serialized_artifact)

        # the aws profile aliasing the account to build in
        self.profile = search('profile', serialized_artifact)

        # path to the packer file
        self.packer_file = search('"packer-file"', serialized_artifact)

        # directory containing files that should be included in the build
        self.packable_dir = search('"packer-dir"', serialized_artifact, "")

        self.secrets = Secrets(search('"Secrets"', serialized_artifact, {}))

        # initialize the inputs (for driving user prompts)
        self.inputs = Inputs(search("Inputs", serialized_artifact, {}))

    def get_name(self):
        return self.name

    def get_description(self):
        return self.description

    def make(self, params, vaults, dry_run_bool=False):

        err = ""

        self.params = params

        # process inputs and load results into params
        self.inputs.load(self.params)

        # load secrets into parmas
        self.secrets.load(self.params, vaults)

        # if the packer dir wasn't specified, assume the same dir
        # that the packerfile is in
        self.set_packable_dir()

        # render variables in all files

        # put the rendered files in the std dump path so they
        #  can be viewed after the yac container stops
        dump_path = get_dump_path(params.get("service-alias"))

        build_path = os.path.join(dump_path, "packer")

        # print(self.params)

        apply_templates_in_dir(self.packable_dir, self.params, build_path)

        # from the packer build command
        packer_file_name = os.path.basename(self.packer_file)
        packer_cmd = "packer build %s" % (packer_file_name)

        if dry_run_bool:
            print("see rendered packer files under %s" % build_path)

        else:
            # the full file path to the gitlab.json file
            packer_path = os.path.join(build_path, packer_file_name)

            # get the AMI_Name && Override Parameter
            with open(packer_path, 'r') as f:
                packer_dict = json.load(f)

            ami_name = jmespath.search("builders[0].ami_name", packer_dict)
            override_param = jmespath.search("builders[0].force_deregister",
                                             packer_dict)

            if (self.ami_exists(params, ami_name) and override_param is False):
                print(
                    "AMI already exists and packer file includes instructions to NOT overwrite"
                )

            else:
                print("build command:\n{0}".format(packer_cmd))
                # get the current working dir
                curr_dir = os.getcwd()

                # cd to the tmp dir
                os.chdir(build_path)

                # the subprocess command expects the command to be split into an array
                # on whitespace boundaries.
                packer_cmd_array = packer_cmd.split(" ")

                try:

                    last_line = ""
                    process = subprocess.Popen(packer_cmd_array,
                                               stdout=subprocess.PIPE)
                    for c_bytes in iter(lambda: process.stdout.read(1), ''):

                        c = c_bytes.decode("utf-8")
                        process.poll()
                        # write this char to stdout
                        if c:
                            sys.stdout.write(c)

                            # troll for ami ids in the packer output by converting individual
                            # chars into packer output lines and using a regex to find ami ids
                            # in each line
                            if c != '\n':
                                last_line = last_line + c

                            else:
                                # cache ami info that appear for future reference
                                # in front end builds
                                ami_id = self._find_ami_id(last_line)

                                if ami_id:
                                    self.ami_id = ami_id

                                # reset the line
                                last_line = c

                        else:
                            err = process.returncode
                            break

                except Exception as e:

                    err = str(e)

                # cd back to the original dir
                os.chdir(curr_dir)

        return err

    # find ami id from a line of packer output
    def _find_ami_id(self, packer_output_str):

        ami_id = ""

        # use regex to look for the ami id in the packer output
        # second group matches any letter (case-insenstive) or int
        regex_result = re.search("(us-west-2: )(ami-[a-zA-Z0-9]{8,})",
                                 packer_output_str)

        # if it exists, second group holds the ami id
        if (regex_result and regex_result.group(2)):

            ami_id = regex_result.group(2)

        return ami_id

    def set_packable_dir(self):

        if not self.packable_dir:

            packer_file_full_path = os.path.join(
                self.params.get('servicefile-path'), self.packer_file)

            self.packable_dir = os.path.dirname(packer_file_full_path)

    def ami_exists(self, params, ami_name):

        ami_exists = False

        session, err = get_session(params)
        if not err:
            client = session.client('ec2')

            response = client.describe_images(Filters=[{
                'Name': 'name',
                'Values': [ami_name]
            }])
        else:
            return err

        if "Images" in response and len(response["Images"]) == 1:

            ami_exists = True

        return ami_exists
Exemple #8
0
    def __init__(self,
                 serialized_service,
                 service_path,
                 alias="",
                 params_path="",
                 kvps=""):

        # first validate. this should throw an exception if
        # required fields aren't present
        validate(serialized_service, "yac/schema/service.json")

        self.path = service_path
        self.kvps_str = kvps
        self.params_file_path = params_path

        self.description = Description(search('Description',
                                       serialized_service,{}),
                                       alias)

        self.vaults = SecretVaults(search('"Vaults"',
                                      serialized_service,[]))

        # a service can references other services that it includes
        self.includes = search("includes",
                             serialized_service,{})

        # initialize stack params (key/value pairs and maps), including static params specified
        # in the serialized service, params from an external file,
        # params specified in a key-value pair string (kvps),
        self.params = Params(search('"Params"',
                                    serialized_service, {}))

        # initialize the dictionary that will hold all params (statics+secrets+inputs)
        self.all_params = {}

        inputs_cacher = InputsCacher(search('InputsCache',
                                    serialized_service,{}))

        self.inputs = Inputs(search('Inputs',
                                    serialized_service,{}),
                             inputs_cacher)

        self.secrets = Secrets(search('"Secrets"',
                                      serialized_service,{}))

        # inialize the stack associated with this service
        self.stack = Stack(search('Stack',
                                  serialized_service, {}))

        self.tasks = Tasks(search('Tasks',
                                    serialized_service,{}))

        # initialize the tests associated with this service
        self.tests = IntegrationTests(search('"IntegrationTests"',
                                      serialized_service,{}))

        # initialize the artifacts associate with this service
        self.artifacts = Artifacts(search('Artifacts',
                                           serialized_service,[]))

        # initialize the credentialer associated with this service
        self.credentialers = Credentialers(search("Credentialers",serialized_service,[]))

        # initialize the pipeline associated with this service
        self.pipeline = Pipeline(search('Pipeline',
                                     serialized_service,{}))

        # load the objects from each included service
        self.load_includes()

        # save a copy of the full serialized version of the
        # service to support the serialize() method
        self.serialized_service = serialized_service
Exemple #9
0
class Service():

    def __init__(self,
                 serialized_service,
                 service_path,
                 alias="",
                 params_path="",
                 kvps=""):

        # first validate. this should throw an exception if
        # required fields aren't present
        validate(serialized_service, "yac/schema/service.json")

        self.path = service_path
        self.kvps_str = kvps
        self.params_file_path = params_path

        self.description = Description(search('Description',
                                       serialized_service,{}),
                                       alias)

        self.vaults = SecretVaults(search('"Vaults"',
                                      serialized_service,[]))

        # a service can references other services that it includes
        self.includes = search("includes",
                             serialized_service,{})

        # initialize stack params (key/value pairs and maps), including static params specified
        # in the serialized service, params from an external file,
        # params specified in a key-value pair string (kvps),
        self.params = Params(search('"Params"',
                                    serialized_service, {}))

        # initialize the dictionary that will hold all params (statics+secrets+inputs)
        self.all_params = {}

        inputs_cacher = InputsCacher(search('InputsCache',
                                    serialized_service,{}))

        self.inputs = Inputs(search('Inputs',
                                    serialized_service,{}),
                             inputs_cacher)

        self.secrets = Secrets(search('"Secrets"',
                                      serialized_service,{}))

        # inialize the stack associated with this service
        self.stack = Stack(search('Stack',
                                  serialized_service, {}))

        self.tasks = Tasks(search('Tasks',
                                    serialized_service,{}))

        # initialize the tests associated with this service
        self.tests = IntegrationTests(search('"IntegrationTests"',
                                      serialized_service,{}))

        # initialize the artifacts associate with this service
        self.artifacts = Artifacts(search('Artifacts',
                                           serialized_service,[]))

        # initialize the credentialer associated with this service
        self.credentialers = Credentialers(search("Credentialers",serialized_service,[]))

        # initialize the pipeline associated with this service
        self.pipeline = Pipeline(search('Pipeline',
                                     serialized_service,{}))

        # load the objects from each included service
        self.load_includes()

        # save a copy of the full serialized version of the
        # service to support the serialize() method
        self.serialized_service = serialized_service

    # add mergeable fields from another service into this service
    def add(self,
            service):

        if service.params:

            self.params.add(service.params)

        if service.secrets:

            self.secrets.add(service.secrets)

        if service.vaults:

            self.vaults.add(service.vaults)

        if service.stack.impl:

            # there can be only one stack
            if self.stack.impl:
                self.stack.add(service.stack)
            else:
                self.stack = service.stack

        if service.tasks:

            self.tasks.add(service.tasks)

        if service.inputs:

            self.inputs.add(service.inputs)

        if service.tests:

            self.tests.add(service.tests)

        if service.artifacts:

            self.artifacts.add(service.artifacts)

        if service.credentialers:

            self.credentialers.add(service.credentialers)

        if service.pipeline and service.pipeline.get_stages():

            # there can be only one pipeline per service
            self.pipeline = service.pipeline

    def add_params_via_kvps(self,kvp_str):
        # load key-value pairs via a kvp string formatted as:
        # <key1>:<value1>,<key2>:<val2>,etc
        self.params.load_kvps(kvp_str)

    def load_includes(self):
        # load objects from each included service

        # for each included service specified
        for service_key in self.includes:

            sub_service_path = self.includes[service_key]["value"]

            # load the included service ...
            this_sub, err = get_service(sub_service_path,
                                   servicefile_path=self.path)

            # add to this service
            if not err:
                print("including '%s' service ..."%(service_key))
                self.add(this_sub)
            else:
                print("included service '%s' could not be loaded from %s"%(service_key,
                                                                           sub_service_path))
                print("error: %s"%err)
                print("exiting ...")
                exit(0)

                exit(0)

    def get_meta_params(self):
        # get meta data about this service

        service_metadata = Params({})
        service_metadata.set("service-default-alias",self.description.default_alias, "service default alias")
        service_metadata.set("service-alias",self.description.alias, "service alias")
        service_metadata.set("service-name",self.description.name, "service name")

        service_metadata.set("servicefile-path",self.path, "path to the servicefile")

        # add service summary and repo
        service_metadata.set('service-summary',self.description.summary, "service summary")
        service_metadata.set('service-repo',self.description.repo, "repo containing this service")

        # add the command that was run against this service
        service_metadata.set("yac-command",sys.argv[0], 'the yac command being run')

        return service_metadata

    def get_params(self):

        # add params describing the service itself
        self.params.add(self.get_meta_params())

        # load any params from yac-supported env variables
        self.params.load_from_env_variables()

        # load kvps (typically used for injecting inputs in pipelines or overriding
        #   an invidual param setpoint)
        self.params.load_kvps(self.kvps_str)

        # load params from file
        self.params.load_from_file(self.params_file_path)

        return self.params

    def get_all_params(self,
                       context="",
                       dry_run_bool=False,
                       credentialer_names=[]):

        # Take a copy of params
        self.all_params = self.get_params()

        # process inputs and load results into params
        self.inputs.load(self.all_params)

        # load secrets into params
        self.secrets.load(self.all_params,
                          self.vaults)

        return self.all_params

    def get_description(self):

        return self.description

    def get_artifacts(self):

        return self.artifacts

    def get_stack(self):

        return self.stack

    def get_tests(self):

        return self.tests

    def get_tasks(self):

        return self.tasks

    def get_vaults(self):

        return self.vaults

    def get_deployer(self):

        return self.deployer()

    def get_inputs(self):

        return self.inputs

    def get_secrets(self):

        return self.secrets

    def get_pipeline(self):

        return self.pipeline

    def get_credentialers(self):

        return self.credentialers

    def get_serialized_pipeline(self):

        return self.serialized_pipeline

    def deploy_boot_files(self, dry_run_bool=False):

        self.boot_files.deploy(self.params, dry_run_bool)

    def serialize(self):
        return self.serialized_service

    def __str__(self):
        ret = ("description:\n %s\n"%self.description +
               "params:\n %s\n"%self.params +
               "secrets:\n %s\n"%self.secrets +
               "stack:\n %s\n"%self.stack +
               "vaults: \n %s\n")%self.vaults
        return ret
class ContainerImage():

    # for building docker container images
    def __init__(self,
                 serialized_artifact):

        validate(serialized_artifact, "yac/schema/makers/container_image.json")

        self.name = search("name",serialized_artifact,"")
        self.description = search("description",serialized_artifact,"")

        self.image = search("image",serialized_artifact)

        # the registry where the images should be pushed
        # defaults to artifactory
        self.registry = search('registry',
                                serialized_artifact,
                                ARTIFACTORY_URL)

        # initialize the inputs (for driving user prompts)
        self.inputs = Inputs(search("Inputs",
                                    serialized_artifact,{}))

        self.secrets = Secrets(search('"Secrets"',
                                    serialized_artifact,{}))

        # client for most operations
        self.client = docker.DockerClient('tcp://%s:%s'%(BUILDER_HOST,
                                                         BUILDER_PORT))

        # client for "low-level" build operations (e.g. builds that send
        # the details on each layer built to stdout )
        # TODO: figure out why auth isn't working from inside a container
        # with this one
        self.api_client = docker.APIClient('tcp://%s:%s'%(BUILDER_HOST,
                                                         BUILDER_PORT))

    def get_name(self):
        return self.name

    def get_description(self):
        return self.description

    def make(self,
             params,
             vaults,
             dry_run_bool=False):

        self.params = params

        # process inputs and load results into params
        self.inputs.load(self.params)

        # load secrets into parmas
        self.secrets.load(self.params,
                          vaults)

        # build the image
        err = self.build(dry_run_bool)

        if not err:

            # login to container registry specified in the servicefile
            err = self.login()

            if not err:

                # push the image to the registry
                err = self.push(dry_run_bool)

        return err

    def login(self):

        err = ""
        # render intrinsics in the registry
        rendered_registry = apply_intrinsics(self.registry,
                                             self.params)
        try:

            print("login using api client")
            response = self.client.login(username = rendered_registry['username'],
                              password = rendered_registry['password'],
                              registry = rendered_registry['host'])

            print(response)

        except docker.errors.APIError as ae:
            err = ae

        return err

    def build(self,
              dry_run_bool=False):

        err = ""
        build_success = False

        # render intrinsics in the image
        rendered_image = apply_intrinsics(self.image,
                                          self.params)

        # put the rendered files in the std dump path so they
        #  can be viewed after the yac container stops
        dump_path = get_dump_path(params.get("service-alias"))
        build_path = os.path.join(dump_path,"docker")

        # path to the docker file (relative to servicefile)
        self.dockerfile_path = search('"dockerfile"',rendered_image,"")

        servicefile_path = self.params.get('servicefile-path')

        dockerfile_full_path = os.path.join(servicefile_path,self.dockerfile_path)

        if os.path.exists(dockerfile_full_path):

            # render variables in the docker file
            apply_templates_in_file(dockerfile_full_path,
                                    self.params,
                                    build_path)

            # assume files are in the same location as the dockerfile
            apply_templates_in_dir(os.path.dirname(dockerfile_full_path),
                                   self.params,
                                   build_path)

            self.image_name =  search("name",rendered_image,"")
            self.image_label = search("label",rendered_image,"")

            # build args
            self.build_args = search('"build-args"',rendered_image,[])

            if dry_run_bool:

                print("see rendered files under %s"%build_path)
                print("(dry-run) building image %s:%s ..."%(self.image_name,
                                                            self.image_label))

            else:
                try:

                    print("building image %s:%s ..."%(self.image_name,
                                                      self.image_label))
                    # build the image
                    self.image,log = self.client.images.build(tag="%s:%s"%(self.image_name,
                                                                   self.image_label),
                                                      path=build_path,
                                                      buildargs=self.build_args)

                    for line in log:

                        if 'stream' in line:
                            print(line['stream'])
                        else:
                            print(line)


                except docker.errors.APIError as ae:
                    err = "APIError: %s"%ae

                except docker.errors.BuildError as be:
                    err = "BuildError: %s"%be

                except TypeError as te:
                    err = "TypeError: %s"%te
        else:
            err = "dockerfile at %s does not exist"%dockerfile_full_path

        return err

    def push(self,
             dry_run_bool=False):

        if dry_run_bool:
            print("(dry-run) pushing image %s:%s ..."%(self.image_name,self.image_label))

        else:
            print("pushing image %s:%s ..."%(self.image_name,self.image_label))

            # push the image to registry
            response = self.client.images.push(repository=self.image_name,
                                               tag=self.image_label)

            print(response)