예제 #1
0
    def __init__(self, credentials_descriptor):
        """ Generates aws credentials for nordstrom users.

        Args:
            credentials_descriptor: A dictionary containing serialized credentialer,
                               satisfying the yac/schema/aws/credentialer.json schema

        Raises:
            ValidationError: if a inputs fails schema validation

        """

        validate(credentials_descriptor,
                 "yac/schema/stacks/aws/credentialer.json")

        self.accounts = search("accounts", credentials_descriptor, [])

        self.region = search("region", credentials_descriptor, [])

        # if urls not provided, use defaults
        self.token_endpoint_url = search('"token-endpoint-url"',
                                         credentials_descriptor,
                                         TOKEN_ENDPOINT_URL)

        self.role_endpoint_url = search('"role-endpoint-url"',
                                        credentials_descriptor,
                                        ROLE_ENDPOINT_URL)

        # initialize the inputs (for driving user prompts)
        self.inputs = Inputs(search("Inputs", credentials_descriptor, {}))
예제 #2
0
class Task():
    def __init__(self, serialized_task):

        self.name = search("name", serialized_task, "")
        self.description = search("description", serialized_task, "")
        self.module = search("module", serialized_task, "")
        self.inputs = Inputs(search("Inputs", serialized_task, {}))

    def run(self, params):

        err = ""
        self.params = params

        servicefile_path = self.params.get('servicefile-path')

        # process task inputs and load results into params
        self.inputs.load(self.params)

        task_module, err = get_module(self.module, servicefile_path)

        # run the handler method in the task module
        if not err:
            if hasattr(task_module, 'task_handler'):
                err = task_module.task_handler(self.params)
            else:
                err = ("task module %s does not have a " +
                       "'test_setup' function" % self.module)

        return err
예제 #3
0
    def __init__(self,
                 credentials_descriptor):

        validate(credentials_descriptor, "yac/schema/stacks/k8s/credentialer.json")

        self.namespace = search("namespace",
                               credentials_descriptor,"")

        self.clusters = search("clusters",
                               credentials_descriptor,
                               ["nonprod","prod"])

        # if tokens are input there should be one per cluster
        self.tokens = search("tokens",
                               credentials_descriptor,
                               [])

        self.secrets = Secrets(search('"Secrets"',
                               credentials_descriptor,{}))

        # initialize the inputs (for driving user prompts)
        self.inputs = Inputs(search("Inputs",
                                    credentials_descriptor,{}))

        # for integration testing it is useful to write files to a
        # root director other than user's home
        self.rootdir = search("rootdir",
                              credentials_descriptor,"")
예제 #4
0
    def __init__(self,
                 serialized_artifact):

        validate(serialized_artifact, "yac/schema/makers/container_image.json")

        self.name = search("name",serialized_artifact,"")
        self.description = search("description",serialized_artifact,"")

        self.image = search("image",serialized_artifact)

        # the registry where the images should be pushed
        # defaults to artifactory
        self.registry = search('registry',
                                serialized_artifact,
                                ARTIFACTORY_URL)

        # initialize the inputs (for driving user prompts)
        self.inputs = Inputs(search("Inputs",
                                    serialized_artifact,{}))

        self.secrets = Secrets(search('"Secrets"',
                                    serialized_artifact,{}))

        # client for most operations
        self.client = docker.DockerClient('tcp://%s:%s'%(BUILDER_HOST,
                                                         BUILDER_PORT))

        # client for "low-level" build operations (e.g. builds that send
        # the details on each layer built to stdout )
        # TODO: figure out why auth isn't working from inside a container
        # with this one
        self.api_client = docker.APIClient('tcp://%s:%s'%(BUILDER_HOST,
                                                         BUILDER_PORT))
예제 #5
0
    def test_params_cache_path(self):

        # remove any existing cache file
        if os.path.exists(TestCase.cache_full_path):
            os.remove(TestCase.cache_full_path)

        test_parameters = {"service-name": {"value": TestCase.name}}

        # load params
        params = Params(test_parameters)

        # create an input w/ only one available setpoint
        serialized_inputs_cacher = {
            "enabled": True,
            "path": TestCase.cache_rel_path,
        }

        inputs_cacher = InputsCacher(serialized_inputs_cacher)

        serialized_inputs = [{
            "key": "env",
            "title": "Environment",
            "type": "string",
            "help": "The environment to build stack for",
            "required": True,
            "options": ["dev"]
        }]

        inputs = Inputs(serialized_inputs, inputs_cacher)

        # inject the correct response to inputs prompt into stdin
        sys.stdin = io.StringIO('dev')

        # load inputs into params
        inputs.load(params)

        # verify the params file was created
        self.assertTrue(os.path.exists(TestCase.cache_full_path))

        # verify the env params is in the file and set properly
        params_from_file = get_params_from_file(TestCase.cache_full_path)
        self.assertTrue(params_from_file.get('env') == 'dev')
예제 #6
0
파일: ami.py 프로젝트: thomas-b-jackson/yac
    def __init__(self, serialized_artifact):

        validate(serialized_artifact, "yac/schema/makers/ami.json")

        self.name = search('name', serialized_artifact)

        self.description = search('description', serialized_artifact)

        # the aws profile aliasing the account to build in
        self.profile = search('profile', serialized_artifact)

        # path to the packer file
        self.packer_file = search('"packer-file"', serialized_artifact)

        # directory containing files that should be included in the build
        self.packable_dir = search('"packer-dir"', serialized_artifact, "")

        self.secrets = Secrets(search('"Secrets"', serialized_artifact, {}))

        # initialize the inputs (for driving user prompts)
        self.inputs = Inputs(search("Inputs", serialized_artifact, {}))
예제 #7
0
    def test_params_load(self):

        # load params
        params = Params({})

        serialized_inputs = [{
            "key": "env",
            "title": "Environment",
            "type": "string",
            "help": "The environment to build stack for",
            "required": True,
            "options": ["dev"]
        }]

        inputs = Inputs(serialized_inputs)

        # inject the correct response to inputs prompt into stdin
        sys.stdin = io.StringIO('dev')

        # load inputs into params
        inputs.load(params)

        self.assertTrue(params.get('env') == 'dev')
예제 #8
0
    def __init__(self, serialized_task):

        self.name = search("name", serialized_task, "")
        self.description = search("description", serialized_task, "")
        self.module = search("module", serialized_task, "")
        self.inputs = Inputs(search("Inputs", serialized_task, {}))
예제 #9
0
class NordstromK8sCredentialer():

    def __init__(self,
                 credentials_descriptor):

        validate(credentials_descriptor, "yac/schema/stacks/k8s/credentialer.json")

        self.namespace = search("namespace",
                               credentials_descriptor,"")

        self.clusters = search("clusters",
                               credentials_descriptor,
                               ["nonprod","prod"])

        # if tokens are input there should be one per cluster
        self.tokens = search("tokens",
                               credentials_descriptor,
                               [])

        self.secrets = Secrets(search('"Secrets"',
                               credentials_descriptor,{}))

        # initialize the inputs (for driving user prompts)
        self.inputs = Inputs(search("Inputs",
                                    credentials_descriptor,{}))

        # for integration testing it is useful to write files to a
        # root director other than user's home
        self.rootdir = search("rootdir",
                              credentials_descriptor,"")

    def create(self,
               params,
               vaults,
               overwrite_bool):

        self.params = params

        # process creds-specific inputs and load results into params
        self.inputs.load(self.params)

        # load any creds-specific secrets into params
        self.secrets.load(self.params,vaults)

        # determine if credentialer is being instantiated on a developer
        # desktop
        self.is_desktop = self.running_on_desktop()

        err = ""
        if overwrite_bool or not self.created_today(".kube/config"):

            # make sure certs are in place
            err = self.install_root_ca_certs()

            # install ~/.kubeloginrc.yaml file
            # note: file is only needed when running on a desktop
            if not err and self.is_desktop:
                self.install_kube_login()

            # create ~/.kube/config file
            if not err:
                self.install_kube_config(params)

            if not err and not self.tokens:
                # explain how to create tokens for the target clusters
                self.token_help()

        else:
            print("k8s ~/.kube/config file has fresh tokens so won't be updated ...")

        return err

    def token_help(self):

        for cluster in self.clusters:

            self.show_kubelogin_help(cluster)

        input("press <enter> when you've completed the kubelogin for all clusters ... >> ")

    def install_root_ca_certs(self):

        # install each cert
        err = self.download_cert(PROD1_CERT_URL,".kube/certs/prod1.pem")

        # stop after the first error
        if not err:
            err = self.download_cert(PROD2_CERT_URL,".kube/certs/prod2.pem")

        if not err:

            if self.is_desktop:
                err = self.download_cert(NONPROD_PRIVATE_CERT_URL,".kube/certs/nonprod.pem")
            else:
                err = self.download_cert(NONPROD_PUBLIC_CERT_URL,".kube/certs/nonprod.pem")

        return err


    def download_cert(self, url, home_rel_path):

        err = ""
        r = requests.get(url, allow_redirects=True)

        if r.status_code == 200:
            self.write_home_file(r.content.decode("utf-8"),home_rel_path)
        else:
            err = "could not download root CA cert from %s"%url

        return err

    def install_kube_config(self, params):

        # use .kubeconfig.yaml as a template
        with open(os.path.join(get_root_path(),
            "lib/stacks/k8s/configs/.kubeconfig.yaml"), 'r') as config_file:
            file_contents = config_file.read()

        # render mustaches in the file ...

        # first worry about how to render the 'token' token in the
        # file

        # use placeholder values (o.w. apply_stemplate will raise TemplateError).
        # the user will need to use kubelogin to overwrite
        stock_tokens = ["via kubelogin", "via kubelogin", "via kubelogin"]

        if not self.tokens:
            # no tokens are provided via servicefile. this is a typical pattern
            # for servicefiles that are meant to be run from a developer desktop.
            tokens = stock_tokens

        else:
            tmp_tokens = stock_tokens

            # make sure there is one token per cluster
            tmp_tokens[0:len(self.tokens)] = self.tokens

            # tokens were specified in the servicefile
            # these will typically include secrets that are referenced
            # via a yac-ref intrinstric, so render intrinsics in the tokens
            tokens = apply_intrinsics(tmp_tokens, params)

        # build the params for each variable in the file
        local_params = Params({})

        # set variables for each of the cluster tokens
        cluster_keys = ["nonprod-token","prod1-token","prod2-token"]

        for i,token in enumerate(tokens):
            local_params.set(cluster_keys[i],token)

        # the namespace params supports intrinsics (so that it can be set via an input)
        namespace = apply_intrinsics(self.namespace, params)

        # set namespace variable for template rendering
        local_params.set("namespace", namespace)

        if self.is_desktop:
            # use the private api to avoid the limitations of the public
            # api endpoint, per:
            #  * https://gitlab.nordstrom.com/k8s/platform-bootstrap/wikis/Onboard-to-AWS-Kubernetes-Clusters
            local_params.set("nonprod-api-url",NONPROD_PRIVATE_API)
        else:
            # pipelines must use the public to avoid v2 account peering
            # contraints
            local_params.set("nonprod-api-url",NONPROD_PUBLIC_API)

        # do the actual mustache rendering
        rendered_file_contents = apply_stemplate(file_contents,local_params)

        # take backup of any existing .kube/config files
        self.backup_existing(".kube/config")

        # write file
        self.write_home_file(rendered_file_contents,".kube/config")

    def install_kube_login(self):

        # get the contents of the .kube/config file
        file_contents = get_file_contents('yac/lib/stacks/k8s/configs/.kubeloginrc.yaml')

        # write file
        self.write_home_file(file_contents,".kubeloginrc.yaml")

        # copy the kubelogin app under the user's home dir
        kubelogin_dest = self.get_kubelogin_path()

        if os.path.exists(kubelogin_dest):
            # remove existing installatiaon of kubelogin
            os.remove(kubelogin_dest)

        print("installing: %s"%kubelogin_dest)

        shutil.copyfile('yac/lib/stacks/k8s/configs/kubelogin',
                        kubelogin_dest)
        os.chmod(kubelogin_dest, stat.S_IREAD | stat.S_IEXEC )

    def backup_existing(self,home_rel_path):

        full_home_path = get_home_path(home_rel_path)

        if os.path.exists(full_home_path):
            # rename existing file
            timestamp = "{:%Y-%m-%d.%H.%M.%S}".format(dt.datetime.now())
            backup_filename = "%s.%s"%(full_home_path,timestamp)
            print("backing up existing ~/.kube/config file to: %s"%backup_filename)
            os.rename(full_home_path,
                      backup_filename)

    def write_home_file(self, content, home_rel_path):

        full_home_path = get_home_path(home_rel_path)

        if not os.path.exists(os.path.dirname(full_home_path)):
            os.makedirs(os.path.dirname(full_home_path))

        print("writing: %s"%(full_home_path))

        open(full_home_path, 'w').write(content)

    def show_kubelogin_help(self, cluster):

        # from the kubelogin command
        kubelogin_path = self.get_kubelogin_path()
        kubelogin_cmd = "%s login %s"%(kubelogin_path,cluster)

        print("run the following command in a separate terminal to generate credentials for the %s cluster:"%cluster)

        print("$ {0}".format( kubelogin_cmd ))

    def created_today(self, home_rel_path):

        # returns true if the the file at home_rel_path was created today
        created_today=False

        full_home_path = get_home_path(home_rel_path)

        if os.path.exists(full_home_path):

            today = dt.datetime.now().date()

            filetime = dt.datetime.fromtimestamp(os.path.getctime(full_home_path))

            if filetime.date() == today:
                created_today = True

        return created_today

    def running_on_desktop(self):
        # returns true if these credentials are being created on a developer desktop
        #
        # the distinction is important for this k8s credentialer as it determines which
        # k8s api endpoint to use. the private endpoint is the most fully featured (esp for
        # kubectl commands) but is only accessible from clients joined to the nordstrom domain.
        # the public endpoint works for most use cases AND is availble from build servers
        # running in most environments (k8s clusters, aws v2 accounts, etc).
        return self.params.get('desktop')

    def get_current_context(self):

        context_name = ""

        full_home_path = get_home_path(".kube/config", self.rootdir)

        if os.path.exists(full_home_path):

            kubernetes.config.load_kube_config()

            contexts, active_context = kubernetes.config.list_kube_config_contexts()

            if ('name' in active_context ):
                context_name = active_context['name']

        return context_name

    def get_kubelogin_path(self):

        return  get_home_path(".kube/kubelogin")
예제 #10
0
파일: ami.py 프로젝트: thomas-b-jackson/yac
class AMI():
    # for building AMIs (amazon machine images) using packer

    def __init__(self, serialized_artifact):

        validate(serialized_artifact, "yac/schema/makers/ami.json")

        self.name = search('name', serialized_artifact)

        self.description = search('description', serialized_artifact)

        # the aws profile aliasing the account to build in
        self.profile = search('profile', serialized_artifact)

        # path to the packer file
        self.packer_file = search('"packer-file"', serialized_artifact)

        # directory containing files that should be included in the build
        self.packable_dir = search('"packer-dir"', serialized_artifact, "")

        self.secrets = Secrets(search('"Secrets"', serialized_artifact, {}))

        # initialize the inputs (for driving user prompts)
        self.inputs = Inputs(search("Inputs", serialized_artifact, {}))

    def get_name(self):
        return self.name

    def get_description(self):
        return self.description

    def make(self, params, vaults, dry_run_bool=False):

        err = ""

        self.params = params

        # process inputs and load results into params
        self.inputs.load(self.params)

        # load secrets into parmas
        self.secrets.load(self.params, vaults)

        # if the packer dir wasn't specified, assume the same dir
        # that the packerfile is in
        self.set_packable_dir()

        # render variables in all files

        # put the rendered files in the std dump path so they
        #  can be viewed after the yac container stops
        dump_path = get_dump_path(params.get("service-alias"))

        build_path = os.path.join(dump_path, "packer")

        # print(self.params)

        apply_templates_in_dir(self.packable_dir, self.params, build_path)

        # from the packer build command
        packer_file_name = os.path.basename(self.packer_file)
        packer_cmd = "packer build %s" % (packer_file_name)

        if dry_run_bool:
            print("see rendered packer files under %s" % build_path)

        else:
            # the full file path to the gitlab.json file
            packer_path = os.path.join(build_path, packer_file_name)

            # get the AMI_Name && Override Parameter
            with open(packer_path, 'r') as f:
                packer_dict = json.load(f)

            ami_name = jmespath.search("builders[0].ami_name", packer_dict)
            override_param = jmespath.search("builders[0].force_deregister",
                                             packer_dict)

            if (self.ami_exists(params, ami_name) and override_param is False):
                print(
                    "AMI already exists and packer file includes instructions to NOT overwrite"
                )

            else:
                print("build command:\n{0}".format(packer_cmd))
                # get the current working dir
                curr_dir = os.getcwd()

                # cd to the tmp dir
                os.chdir(build_path)

                # the subprocess command expects the command to be split into an array
                # on whitespace boundaries.
                packer_cmd_array = packer_cmd.split(" ")

                try:

                    last_line = ""
                    process = subprocess.Popen(packer_cmd_array,
                                               stdout=subprocess.PIPE)
                    for c_bytes in iter(lambda: process.stdout.read(1), ''):

                        c = c_bytes.decode("utf-8")
                        process.poll()
                        # write this char to stdout
                        if c:
                            sys.stdout.write(c)

                            # troll for ami ids in the packer output by converting individual
                            # chars into packer output lines and using a regex to find ami ids
                            # in each line
                            if c != '\n':
                                last_line = last_line + c

                            else:
                                # cache ami info that appear for future reference
                                # in front end builds
                                ami_id = self._find_ami_id(last_line)

                                if ami_id:
                                    self.ami_id = ami_id

                                # reset the line
                                last_line = c

                        else:
                            err = process.returncode
                            break

                except Exception as e:

                    err = str(e)

                # cd back to the original dir
                os.chdir(curr_dir)

        return err

    # find ami id from a line of packer output
    def _find_ami_id(self, packer_output_str):

        ami_id = ""

        # use regex to look for the ami id in the packer output
        # second group matches any letter (case-insenstive) or int
        regex_result = re.search("(us-west-2: )(ami-[a-zA-Z0-9]{8,})",
                                 packer_output_str)

        # if it exists, second group holds the ami id
        if (regex_result and regex_result.group(2)):

            ami_id = regex_result.group(2)

        return ami_id

    def set_packable_dir(self):

        if not self.packable_dir:

            packer_file_full_path = os.path.join(
                self.params.get('servicefile-path'), self.packer_file)

            self.packable_dir = os.path.dirname(packer_file_full_path)

    def ami_exists(self, params, ami_name):

        ami_exists = False

        session, err = get_session(params)
        if not err:
            client = session.client('ec2')

            response = client.describe_images(Filters=[{
                'Name': 'name',
                'Values': [ami_name]
            }])
        else:
            return err

        if "Images" in response and len(response["Images"]) == 1:

            ami_exists = True

        return ami_exists
예제 #11
0
    def __init__(self,
                 serialized_service,
                 service_path,
                 alias="",
                 params_path="",
                 kvps=""):

        # first validate. this should throw an exception if
        # required fields aren't present
        validate(serialized_service, "yac/schema/service.json")

        self.path = service_path
        self.kvps_str = kvps
        self.params_file_path = params_path

        self.description = Description(search('Description',
                                       serialized_service,{}),
                                       alias)

        self.vaults = SecretVaults(search('"Vaults"',
                                      serialized_service,[]))

        # a service can references other services that it includes
        self.includes = search("includes",
                             serialized_service,{})

        # initialize stack params (key/value pairs and maps), including static params specified
        # in the serialized service, params from an external file,
        # params specified in a key-value pair string (kvps),
        self.params = Params(search('"Params"',
                                    serialized_service, {}))

        # initialize the dictionary that will hold all params (statics+secrets+inputs)
        self.all_params = {}

        inputs_cacher = InputsCacher(search('InputsCache',
                                    serialized_service,{}))

        self.inputs = Inputs(search('Inputs',
                                    serialized_service,{}),
                             inputs_cacher)

        self.secrets = Secrets(search('"Secrets"',
                                      serialized_service,{}))

        # inialize the stack associated with this service
        self.stack = Stack(search('Stack',
                                  serialized_service, {}))

        self.tasks = Tasks(search('Tasks',
                                    serialized_service,{}))

        # initialize the tests associated with this service
        self.tests = IntegrationTests(search('"IntegrationTests"',
                                      serialized_service,{}))

        # initialize the artifacts associate with this service
        self.artifacts = Artifacts(search('Artifacts',
                                           serialized_service,[]))

        # initialize the credentialer associated with this service
        self.credentialers = Credentialers(search("Credentialers",serialized_service,[]))

        # initialize the pipeline associated with this service
        self.pipeline = Pipeline(search('Pipeline',
                                     serialized_service,{}))

        # load the objects from each included service
        self.load_includes()

        # save a copy of the full serialized version of the
        # service to support the serialize() method
        self.serialized_service = serialized_service
예제 #12
0
class Service():

    def __init__(self,
                 serialized_service,
                 service_path,
                 alias="",
                 params_path="",
                 kvps=""):

        # first validate. this should throw an exception if
        # required fields aren't present
        validate(serialized_service, "yac/schema/service.json")

        self.path = service_path
        self.kvps_str = kvps
        self.params_file_path = params_path

        self.description = Description(search('Description',
                                       serialized_service,{}),
                                       alias)

        self.vaults = SecretVaults(search('"Vaults"',
                                      serialized_service,[]))

        # a service can references other services that it includes
        self.includes = search("includes",
                             serialized_service,{})

        # initialize stack params (key/value pairs and maps), including static params specified
        # in the serialized service, params from an external file,
        # params specified in a key-value pair string (kvps),
        self.params = Params(search('"Params"',
                                    serialized_service, {}))

        # initialize the dictionary that will hold all params (statics+secrets+inputs)
        self.all_params = {}

        inputs_cacher = InputsCacher(search('InputsCache',
                                    serialized_service,{}))

        self.inputs = Inputs(search('Inputs',
                                    serialized_service,{}),
                             inputs_cacher)

        self.secrets = Secrets(search('"Secrets"',
                                      serialized_service,{}))

        # inialize the stack associated with this service
        self.stack = Stack(search('Stack',
                                  serialized_service, {}))

        self.tasks = Tasks(search('Tasks',
                                    serialized_service,{}))

        # initialize the tests associated with this service
        self.tests = IntegrationTests(search('"IntegrationTests"',
                                      serialized_service,{}))

        # initialize the artifacts associate with this service
        self.artifacts = Artifacts(search('Artifacts',
                                           serialized_service,[]))

        # initialize the credentialer associated with this service
        self.credentialers = Credentialers(search("Credentialers",serialized_service,[]))

        # initialize the pipeline associated with this service
        self.pipeline = Pipeline(search('Pipeline',
                                     serialized_service,{}))

        # load the objects from each included service
        self.load_includes()

        # save a copy of the full serialized version of the
        # service to support the serialize() method
        self.serialized_service = serialized_service

    # add mergeable fields from another service into this service
    def add(self,
            service):

        if service.params:

            self.params.add(service.params)

        if service.secrets:

            self.secrets.add(service.secrets)

        if service.vaults:

            self.vaults.add(service.vaults)

        if service.stack.impl:

            # there can be only one stack
            if self.stack.impl:
                self.stack.add(service.stack)
            else:
                self.stack = service.stack

        if service.tasks:

            self.tasks.add(service.tasks)

        if service.inputs:

            self.inputs.add(service.inputs)

        if service.tests:

            self.tests.add(service.tests)

        if service.artifacts:

            self.artifacts.add(service.artifacts)

        if service.credentialers:

            self.credentialers.add(service.credentialers)

        if service.pipeline and service.pipeline.get_stages():

            # there can be only one pipeline per service
            self.pipeline = service.pipeline

    def add_params_via_kvps(self,kvp_str):
        # load key-value pairs via a kvp string formatted as:
        # <key1>:<value1>,<key2>:<val2>,etc
        self.params.load_kvps(kvp_str)

    def load_includes(self):
        # load objects from each included service

        # for each included service specified
        for service_key in self.includes:

            sub_service_path = self.includes[service_key]["value"]

            # load the included service ...
            this_sub, err = get_service(sub_service_path,
                                   servicefile_path=self.path)

            # add to this service
            if not err:
                print("including '%s' service ..."%(service_key))
                self.add(this_sub)
            else:
                print("included service '%s' could not be loaded from %s"%(service_key,
                                                                           sub_service_path))
                print("error: %s"%err)
                print("exiting ...")
                exit(0)

                exit(0)

    def get_meta_params(self):
        # get meta data about this service

        service_metadata = Params({})
        service_metadata.set("service-default-alias",self.description.default_alias, "service default alias")
        service_metadata.set("service-alias",self.description.alias, "service alias")
        service_metadata.set("service-name",self.description.name, "service name")

        service_metadata.set("servicefile-path",self.path, "path to the servicefile")

        # add service summary and repo
        service_metadata.set('service-summary',self.description.summary, "service summary")
        service_metadata.set('service-repo',self.description.repo, "repo containing this service")

        # add the command that was run against this service
        service_metadata.set("yac-command",sys.argv[0], 'the yac command being run')

        return service_metadata

    def get_params(self):

        # add params describing the service itself
        self.params.add(self.get_meta_params())

        # load any params from yac-supported env variables
        self.params.load_from_env_variables()

        # load kvps (typically used for injecting inputs in pipelines or overriding
        #   an invidual param setpoint)
        self.params.load_kvps(self.kvps_str)

        # load params from file
        self.params.load_from_file(self.params_file_path)

        return self.params

    def get_all_params(self,
                       context="",
                       dry_run_bool=False,
                       credentialer_names=[]):

        # Take a copy of params
        self.all_params = self.get_params()

        # process inputs and load results into params
        self.inputs.load(self.all_params)

        # load secrets into params
        self.secrets.load(self.all_params,
                          self.vaults)

        return self.all_params

    def get_description(self):

        return self.description

    def get_artifacts(self):

        return self.artifacts

    def get_stack(self):

        return self.stack

    def get_tests(self):

        return self.tests

    def get_tasks(self):

        return self.tasks

    def get_vaults(self):

        return self.vaults

    def get_deployer(self):

        return self.deployer()

    def get_inputs(self):

        return self.inputs

    def get_secrets(self):

        return self.secrets

    def get_pipeline(self):

        return self.pipeline

    def get_credentialers(self):

        return self.credentialers

    def get_serialized_pipeline(self):

        return self.serialized_pipeline

    def deploy_boot_files(self, dry_run_bool=False):

        self.boot_files.deploy(self.params, dry_run_bool)

    def serialize(self):
        return self.serialized_service

    def __str__(self):
        ret = ("description:\n %s\n"%self.description +
               "params:\n %s\n"%self.params +
               "secrets:\n %s\n"%self.secrets +
               "stack:\n %s\n"%self.stack +
               "vaults: \n %s\n")%self.vaults
        return ret
예제 #13
0
class NordstromAWSCredentialer():
    def __init__(self, credentials_descriptor):
        """ Generates aws credentials for nordstrom users.

        Args:
            credentials_descriptor: A dictionary containing serialized credentialer,
                               satisfying the yac/schema/aws/credentialer.json schema

        Raises:
            ValidationError: if a inputs fails schema validation

        """

        validate(credentials_descriptor,
                 "yac/schema/stacks/aws/credentialer.json")

        self.accounts = search("accounts", credentials_descriptor, [])

        self.region = search("region", credentials_descriptor, [])

        # if urls not provided, use defaults
        self.token_endpoint_url = search('"token-endpoint-url"',
                                         credentials_descriptor,
                                         TOKEN_ENDPOINT_URL)

        self.role_endpoint_url = search('"role-endpoint-url"',
                                        credentials_descriptor,
                                        ROLE_ENDPOINT_URL)

        # initialize the inputs (for driving user prompts)
        self.inputs = Inputs(search("Inputs", credentials_descriptor, {}))

    def create(self, params, vaults, overwrite_bool):

        # Writes credentials to a file for each configured account
        #
        # returns:
        #   err: string containing error message for failures

        err = ""
        self.params = params

        # process creds-specific inputs and load results into params
        self.inputs.load(self.params)

        if not self.is_desktop():
            # if this is being run on a build server, aws access permissions should be
            # conferred via an iam role rather than via credentials
            print(
                "aws credentials not created ... assuming access provided via iam role"
            )
            print(
                "(note: if running on desktop, indicate by exporting the env variable DESKTOP=true)"
            )
            return err

        token_file_path = get_credentials_path()

        token_file_dir = os.path.dirname(token_file_path)

        # make sure directory exists
        if not os.path.exists(token_file_dir):
            os.makedirs(token_file_dir)

        # if the token file doesn't exist or has expired, or if
        # the file should be overwritten regardless of its status ...
        if overwrite_bool or self.needs_updating(token_file_path):

            # write credentials into file
            file_ = open(token_file_path, 'w')

            # accounts support instrinsics. render before proceeding
            accounts = apply_intrinsics(self.accounts, self.params)

            for account in accounts:

                account_name = account['name']

                # create access tokens via lan credentials
                aws_access, err = self.get_session_tokens(account_name)

                if not err:

                    print("generating credentials for aws account: %s" %
                          account_name)

                    # note that the token is included twice due to an inconsistency in aws
                    # cli versus boto SDK.
                    # aws cli uses aws_session_token while boto uses aws_security_token
                    credentials = "aws_access_key_id = " + aws_access['AccessKey'] + "\n" + \
                                  "aws_secret_access_key = " + aws_access['SecretAccessKey'] + "\n" + \
                                  "aws_session_token = " + aws_access['SessionToken'] + "\n" + \
                                  "aws_security_token = " + aws_access['SessionToken'] + "\n\n"

                    if 'alias' in account:
                        file_.write("[%s]\n" % account['alias'] + credentials)
                        print(
                            "warning: the 'alias' in nordstrom aws credentials is deprecated. use 'profile' instead"
                        )
                    else:
                        file_.write("[%s]\n" % account['profile'] +
                                    credentials)

                    if account['default']:
                        file_.write("[default]\n" + credentials)

            file_.close()

        return err

    def get_session_tokens(self, account_name):
        # Creates a session token given an account
        #
        # args:
        #   account_name: aws account name
        #
        # files:
        #   .lanid: file containing lanid of aws user
        #   .lanpwd: file containin lanpwd of aws user
        #
        # returns:
        #    token: session token
        #    err: any erros encountered

        lanid = self.get_lan_id()
        pwd = self.get_lan_pwd()

        token = {}
        err = ""

        # get principal/role pair for the account input
        role, err = self.get_role(lanid, pwd, account_name)

        if not err:

            header = {}
            header['content-type'] = 'application/json'
            header['accept'] = 'application/json'

            lan_auth = "*****@*****.**" % lanid

            # silence insecure request warnings
            requests.packages.urllib3.disable_warnings(InsecureRequestWarning)

            response = requests.post(self.token_endpoint_url,
                                     auth=(lan_auth, pwd),
                                     headers=header,
                                     data=json.dumps(role),
                                     verify=False)

            if response.status_code == 200:

                token = response.json()

            else:
                err = "tokens not received!. Status: %s" % response.status_code

        return token, err

    def get_role(self, lanid, pwd, role_str):
        # Return role given a lanid and password and account
        #
        # returns:
        #   role: dictionary containing role
        #   err: string with any errors encountered

        role = {}
        err = ""

        header = {}
        header['content-type'] = 'application/json'
        header['accept'] = 'application/json'

        lan_auth = "*****@*****.**" % lanid

        # silence insecure request warnings
        requests.packages.urllib3.disable_warnings(InsecureRequestWarning)

        response = requests.get(self.role_endpoint_url,
                                auth=(lan_auth, pwd),
                                verify=False)

        if response.status_code == 200:

            pairs = response.json()

            # find the non-prod pair
            for pair in pairs:

                if role_str in pair['Role']:

                    role = pair
        else:

            err = ("Could not retrieve aws accounts for " +
                   "user '%s'. " % lan_auth +
                   "Status code: %s\n" % response.status_code)

        return role, err

    def get_lan_pwd(self):

        lan_pwd = ""

        # default location for lan pwd file
        pwd_cache_path = os.path.join(get_cache_path(), '.lanpwd')

        if os.path.exists(pwd_cache_path):

            print("reading cached password from %s ..." % pwd_cache_path)

            with open(pwd_cache_path, 'r') as myfile:
                # read pwd from file and strip off any whitespace chars
                lan_pwd = myfile.read().strip()

        else:

            lan_pwd = getpass.getpass("Please input your lan pwd >> ")

            # cache the pwd for future use
            pwd_cache_dir = os.path.dirname(pwd_cache_path)

            # make sure directory exists
            if not os.path.exists(pwd_cache_dir):
                os.makedirs(pwd_cache_dir)

            with open(pwd_cache_path, 'w') as myfile:
                myfile.write(lan_pwd)

        return lan_pwd

    def get_lan_id(self, disallow_caching=False):

        lan_id = ""

        # default location for lan is
        id_cache_path = os.path.join(get_cache_path(), '.lanid')

        if os.path.exists(id_cache_path):
            with open(id_cache_path, 'r') as myfile:

                # strip any carriage returns
                lan_id = myfile.read().strip()
        else:

            lan_id = getpass.getuser()

        return lan_id

    def needs_updating(self, creds_file_path):
        # returns true if either:
        #   * the token file does not exist, or
        #   * the token file is empty, or
        #   * the token file is more than an hour old
        needs_updating = True

        if os.path.exists(creds_file_path):

            # make sure file isn't empty
            if os.stat(creds_file_path).st_size > 2:

                filetime = dt.datetime.fromtimestamp(
                    os.path.getctime(creds_file_path))

                if (filetime.day == dt.datetime.now().day
                        and filetime.hour == dt.datetime.now().hour):
                    # was created on this day in this hour, so
                    # does not need updating
                    print(
                        "aws credentials are still fresh so won't be updated ..."
                    )
                    needs_updating = False

            elif os.stat(creds_file_path).st_size <= 2:
                print("aws credentials is empty so will be updated ...")
                needs_updating = True

        return needs_updating

    def creds_exist(self):
        return os.path.exists(get_credentials_path())

    def is_desktop(self):

        # the desktop params is set to true if yac was run from a developer desktop
        return self.params.get('desktop')
예제 #14
0
class ContainerImage():

    # for building docker container images
    def __init__(self,
                 serialized_artifact):

        validate(serialized_artifact, "yac/schema/makers/container_image.json")

        self.name = search("name",serialized_artifact,"")
        self.description = search("description",serialized_artifact,"")

        self.image = search("image",serialized_artifact)

        # the registry where the images should be pushed
        # defaults to artifactory
        self.registry = search('registry',
                                serialized_artifact,
                                ARTIFACTORY_URL)

        # initialize the inputs (for driving user prompts)
        self.inputs = Inputs(search("Inputs",
                                    serialized_artifact,{}))

        self.secrets = Secrets(search('"Secrets"',
                                    serialized_artifact,{}))

        # client for most operations
        self.client = docker.DockerClient('tcp://%s:%s'%(BUILDER_HOST,
                                                         BUILDER_PORT))

        # client for "low-level" build operations (e.g. builds that send
        # the details on each layer built to stdout )
        # TODO: figure out why auth isn't working from inside a container
        # with this one
        self.api_client = docker.APIClient('tcp://%s:%s'%(BUILDER_HOST,
                                                         BUILDER_PORT))

    def get_name(self):
        return self.name

    def get_description(self):
        return self.description

    def make(self,
             params,
             vaults,
             dry_run_bool=False):

        self.params = params

        # process inputs and load results into params
        self.inputs.load(self.params)

        # load secrets into parmas
        self.secrets.load(self.params,
                          vaults)

        # build the image
        err = self.build(dry_run_bool)

        if not err:

            # login to container registry specified in the servicefile
            err = self.login()

            if not err:

                # push the image to the registry
                err = self.push(dry_run_bool)

        return err

    def login(self):

        err = ""
        # render intrinsics in the registry
        rendered_registry = apply_intrinsics(self.registry,
                                             self.params)
        try:

            print("login using api client")
            response = self.client.login(username = rendered_registry['username'],
                              password = rendered_registry['password'],
                              registry = rendered_registry['host'])

            print(response)

        except docker.errors.APIError as ae:
            err = ae

        return err

    def build(self,
              dry_run_bool=False):

        err = ""
        build_success = False

        # render intrinsics in the image
        rendered_image = apply_intrinsics(self.image,
                                          self.params)

        # put the rendered files in the std dump path so they
        #  can be viewed after the yac container stops
        dump_path = get_dump_path(params.get("service-alias"))
        build_path = os.path.join(dump_path,"docker")

        # path to the docker file (relative to servicefile)
        self.dockerfile_path = search('"dockerfile"',rendered_image,"")

        servicefile_path = self.params.get('servicefile-path')

        dockerfile_full_path = os.path.join(servicefile_path,self.dockerfile_path)

        if os.path.exists(dockerfile_full_path):

            # render variables in the docker file
            apply_templates_in_file(dockerfile_full_path,
                                    self.params,
                                    build_path)

            # assume files are in the same location as the dockerfile
            apply_templates_in_dir(os.path.dirname(dockerfile_full_path),
                                   self.params,
                                   build_path)

            self.image_name =  search("name",rendered_image,"")
            self.image_label = search("label",rendered_image,"")

            # build args
            self.build_args = search('"build-args"',rendered_image,[])

            if dry_run_bool:

                print("see rendered files under %s"%build_path)
                print("(dry-run) building image %s:%s ..."%(self.image_name,
                                                            self.image_label))

            else:
                try:

                    print("building image %s:%s ..."%(self.image_name,
                                                      self.image_label))
                    # build the image
                    self.image,log = self.client.images.build(tag="%s:%s"%(self.image_name,
                                                                   self.image_label),
                                                      path=build_path,
                                                      buildargs=self.build_args)

                    for line in log:

                        if 'stream' in line:
                            print(line['stream'])
                        else:
                            print(line)


                except docker.errors.APIError as ae:
                    err = "APIError: %s"%ae

                except docker.errors.BuildError as be:
                    err = "BuildError: %s"%be

                except TypeError as te:
                    err = "TypeError: %s"%te
        else:
            err = "dockerfile at %s does not exist"%dockerfile_full_path

        return err

    def push(self,
             dry_run_bool=False):

        if dry_run_bool:
            print("(dry-run) pushing image %s:%s ..."%(self.image_name,self.image_label))

        else:
            print("pushing image %s:%s ..."%(self.image_name,self.image_label))

            # push the image to registry
            response = self.client.images.push(repository=self.image_name,
                                               tag=self.image_label)

            print(response)