def run(self, params, context="", test_names=[], group_names=[], setup_only=False, cleanup_only=False, test_only=False): # run tests # # args: # params: Params instance # context: string describing build context # test_name: run the tests with these test names # group_name: run the test groups with these group names # setup_only: boolean - perform setup only # cleanup_only: boolean - perform cleanup only # test_only: boolean - skip setup and cleanup # # return: # err: string with error message if setup or cleanup failed self.params = params # render intrinsics in tests rendered_tests = apply_intrinsics(self.tests, self.params) # render intrinsics in the target map self.target_map = apply_intrinsics(self.raw_target_map, self.params) tests_to_run, err = self._get_tests(test_names, rendered_tests) if not err: for test in tests_to_run: err = self._run_test(test, setup_only, cleanup_only, test_only, context) if not err: # render intrinsics in test groups rendered_test_groups = apply_intrinsics(self.test_groups, self.params) tests_groups_to_run, err = self._get_test_groups( group_names, rendered_test_groups) if not err: for group in tests_groups_to_run: err = self._run_group(group, setup_only, cleanup_only, test_only, context) return err
def run( self ): # render variables in the artillery variables apply_intrinsics(self.variables,self.params) variables_str = "" if self.variables: # render variables into a string # note: need to make sure no whitespace chars are included variables_str = "-v '%s'"%json.dumps(self.variables,separators=(',', ':')) results_path = self.get_results_path() # from the artillery command artillery_cmd = "artillery run %s %s -k -t %s -o %s"%( self.config_path, variables_str, self.target, results_path ) print("artillery command:\n{0}".format( artillery_cmd )) process = subprocess.Popen( artillery_cmd, shell=True, stdout=subprocess.PIPE ) while True: line = process.stdout.readline() if line != b'': os.write(1, line) else: break # load results results_dict = load_dictionary( results_path ) if self.test_name in self.artillery_aggregates: print("name collision: %s is already included in test results"%self.test_name) self.test_name = "%s-%s"%(self.test_name,random.randint(1,10)) print("saving results as %s"%self.test_name) # initiate results for this test self.artillery_aggregates[self.test_name] = {} # save the aggregates if 'aggregate' in results_dict: self.artillery_aggregates[self.test_name] = results_dict['aggregate'] # register results file self.test_results.append_results_file( results_path ) # run assertions self.assert_results()
def test_no_yac_fxn(self): # test when a dictionary references a non-existant yac-fxn params = { "service-alias": { "type": "string", "value": "myservice" }, "env": { "type": "string", "value": "dev" } } test_dict1 = { "myparam": { "comment": "testing", "value": { "yac-calc": ["yac/tests/intrinsics/vectors/nonexistant.py"] } } } test_dict2 = { "myparam": { "comment": "testing", "value": { "yac-calc": ["no-existant-stock"] } } } # run test error_received1 = False try: updated_dict1 = apply_intrinsics(test_dict1, Params(params)) except IntrinsicsError as e: error_received1 = True error_received2 = False try: updated_dict2 = apply_intrinsics(test_dict2, Params(params)) except IntrinsicsError as e: error_received2 = True self.assertTrue(error_received1) self.assertTrue(error_received2)
def load(self, params, vaults=None): if vaults: self.vaults = vaults # initialize vaults vaults.initialize(params) # load secrets into the params object passed in rendered_values = apply_intrinsics(self.values, params) # the secret key is also the param key for param_key in list(rendered_values.keys()): secret_value = self.vaults.get( rendered_values[param_key]['source'], rendered_values[param_key]['lookup']) if secret_value: comment = rendered_values[param_key]['comment'] # load secret into a param params.set(param_key, secret_value, comment) else: # lookup failed msg = ("secret for '%s' at path '%s' " % (param_key, rendered_values[param_key]['lookup']) + "does not exist in the '%s' vault" % rendered_values[param_key]['source']) self.load_failure(msg)
def test_reference_error(self): params = { "suffix" : { "type" : "string", "value": "" }, "s3_path": { "type" : "boolean", "value": "/sets/jira/dev" } } test_dict = { "volumesFrom": { "comment": "testing", "value": {"yac-ref": "suffi"} } } error_received=False try: updated_dict = apply_intrinsics(test_dict, Params(params)) except IntrinsicsError as e: error_received = True print(json.dumps(test_dict,indent=2)) self.assertTrue(error_received)
def test_yac_fxn(self): params = { "service-alias": { "type": "string", "value": "myservice" }, "env": { "type": "string", "value": "dev" } } test_dict = { "myparam": { "comment": "testing", "value": { "yac-fxn": "yac/tests/intrinsics/vectors/fxn.py" } } } # run test updated_dict = apply_intrinsics(test_dict, Params(params)) # test that the value is populated per the value returned by fxn.py fxn_check = updated_dict['myparam']['value'] == "myservice" self.assertTrue(fxn_check)
def test_map(self): params = { "user-name": { "value": "henry-grantham" }, "neighborhood-map": { "lookup": "user-name", "value": { "tom-jackson": "phinney", "henry-grantham": "capital-hill" } } } test_dict = { "InstancePort": { "Ref": "WebServerPort" }, "LoadBalancerName": { "yac-ref": "neighborhood-map" } } # run test updated_dict = apply_intrinsics(test_dict, Params(params)) updated_dict_str = json.dumps(updated_dict) print(updated_dict_str) ref_check = "capital-hill" in updated_dict_str self.assertTrue(ref_check)
def test_map_name_error(self): params = { "user-name": { "value": "henry-grantham" }, "neighborhood-map": { "lookup": "user-name", "value": { "tom-jackson": "phinney", "henry-grantham": "capital-hill" } } } test_dict = { "InstancePort": { "Ref": "WebServerPort" }, "LoadBalancerName": { "yac-ref": "neighborhood-maps" } } # verify we get an error error_received = False try: updated_dict = apply_intrinsics(test_dict, Params(params)) except IntrinsicsError as e: error_received = True self.assertTrue(error_received)
def test_null_join(self): params = { "suffix" : { "type" : "string", "value": "" }, "s3_path": { "type" : "boolean", "value": "/sets/jira/dev" } } test_dict = { "volumesFrom": { "comment": "testing", "value": {"yac-join" : [ "/", [ {"yac-ref": "s3_path"}, {"yac-ref": "suffix"}, "backups.json" ]]} } } # run test updated_dict = apply_intrinsics(test_dict, Params(params)) join_check = updated_dict['volumesFrom']['value'] == "/sets/jira/dev/backups.json" self.assertTrue(join_check)
def test_naming_convention(self): params = { "service-alias": { "value": "jira" }, "env": { "value": "prod" }, "naming-convention": { "comment": "name resources using the alias followed by the environment", "value": { "param-keys": ['service-alias', 'env'], "delimiter": "." } } } test_dict = { "Type": "AWS::AutoScaling::AutoScalingGroup", "Name": { "yac-name": "asg" } } updated_dict = apply_intrinsics(test_dict, Params(params)) name_check = updated_dict['Name'] == 'jira.prod.asg' self.assertTrue(name_check)
def changes(self, params, context): self.params = params # initialize a session with boto3 self.session, err = get_session(self.params) self.name = get_stack_name(self.params) # apply intrinsics to the stack template rendered_stack_template = apply_intrinsics(self.serialize(), self.params) stack_exits_bool = self.exists() # determine if we are building or updating this stack action = UPDATING if stack_exits_bool else BUILDING stack_state = self.get_stack_state() # print stack template to a string stack_template_str = json.dumps(rendered_stack_template) service_name = self.params.get('service-name') service_alias = self.params.get('service-alias') if action == UPDATING: analyze_changes = input( "(BETA!) Analyze changes associated with this stack update? (y/n)> " ) if analyze_changes and analyze_changes == 'y': # get stack params stack_params = self.cf_param_map.get_params_array( self.params, self.session, self.name, True) # Get the optional "staging" location where the stack template can be staged. # The location is only used if the template string exceeds Amazon API's character limit. template_staging_path = self.params.get( 'template-staging-s3-path', "") change_arn, change_error = self.analyze_changes( template_string=stack_template_str, stack_params=stack_params, template_staging_path=template_staging_path) if not change_error: print( "Changes associated with this update can be viewed via the cloudformation console" ) print( "See the 'proposed-changes' change set via the 'Change Sets' tab on the %s stack" % self.name) else: print("Change set creation failed with error: %s" % change_error)
def build(self, params, deploy_mode_bool=False, context=""): self.params = params # render intrinsics in resources rendered_resources = apply_intrinsics(self.resource_array, params) err = "" for resource in rendered_resources: resource_name = "" kind = "" if 'metadata' in resource and 'name' in resource['metadata']: resource_name = resource['metadata']['name'] print("building resource %s"%resource_name) else: print("resource lacks a 'name' attribute. aborting") break if 'kind' in resource: kind = resource['kind'] else: err = "resource lacks a 'kind' attribute. aborting" break resource_exists,err = self.resource_exists(resource_name,kind) if resource_exists: action = "apply" else: action = "create" resource_file_path = dump_dictionary(resource, self.params.get("servicefile-path"), "%s.json"%kind.lower()) contextual_action_str = self.get_context_str(action, context) kubectl_command = "kubectl %s -f %s"%(contextual_action_str, resource_file_path) output, err = self.run_kubectl(kubectl_command) if not err: print("%s '%s' created"%(kind,resource_name)) else: err = "build of '%s' resource '%s' failed with error: %s. aborting"%(kind,resource_name,err) break return err
def cost(self, params, context): self.params = params # initialize a session with boto3 self.session, err = get_session(params) self.name = get_stack_name(self.params) # apply intrinsics to the stack template rendered_stack_template = apply_intrinsics(self.serialize(), self.params) stack_exits_bool = self.exists() # determine if we are building or updating this stack action = UPDATING if stack_exits_bool else BUILDING stack_state = self.get_stack_state() # print stack template to a string stack_template_str = json.dumps(rendered_stack_template) service_name = self.params.get('service-name') service_alias = self.params.get('service-alias') estimate_cost = input( "Estimate cost associate with stack resources? (y/n)> ") if estimate_cost and estimate_cost == 'y': # get stack params stack_params = self.cf_param_map.get_params_array( self.params, self.session, self.name, True) # Get the optional "staging" location where the stack template can be staged. # The location is only used if the template string exceeds Amazon API's character limit. template_staging_path = self.params.get('template-staging-s3-path', "") cost_response, cost_error = self.cost_stack( template_string=stack_template_str, stack_params=stack_params, template_staging_path=template_staging_path) if not cost_error: print( "Cost of the resources for this service can be viewed here: %s" % (cost_response)) else: print("Costing failed: %s" % cost_error)
def test_include_yaml(self): params = {"user-name": {"value": "henry-grantham"}} test_dict = [{ "yac-include": "yac/tests/intrinsics/vectors/include.yaml" }] # run test rendered_dict = apply_intrinsics(test_dict, Params(params)) ref_check = "henry-grantham" in rendered_dict[0]["key"] self.assertTrue(ref_check)
def test_list_of_lists(self): params = { "ssl-cert" : { "type" : "string", "value": "godzilla" }, "s3_path": { "type" : "boolean", "value": "/sets/jira/dev" } } test_dict = {"Listeners": [ { "InstancePort": { "Ref": "WebServerPort" }, "SSLCertificateId": { "Fn::Join": [ "", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":server-certificate", "/", { "yac-ref": "ssl-cert" } ] ] }, "LoadBalancerPort": "443", "Protocol": "HTTPS", "InstanceProtocol": "HTTPS" } ] } # run test updated_dict = apply_intrinsics(test_dict, Params(params)) updated_dict_str = json.dumps(updated_dict) ref_check = "godzilla" in updated_dict_str self.assertTrue(ref_check)
def dryrun(self, params, deploy_mode_bool=False, context=""): err = "" self.params = params # render intrinsics in resources rendered_resources = apply_intrinsics(self.resource_array, params) for resource in rendered_resources: if 'metadata' in resource and 'name' in resource['metadata']: resource_name = resource['metadata']['name'] print("building resource %s"%resource_name) else: err = "resource lacks a name. aborting" break if 'kind' in resource: kind = resource['kind'] else: err = "resource lacks a kind attribute. aborting" break resource_exists,err = self.resource_exists(resource_name,kind) if not resource_exists and not err: print("'%s' resource '%s' will be created as it does not yet exist"%(kind,resource_name)) elif resource_exists and not err: print("'%s' resource '%s' will be updated"%(kind,resource_name)) else: err = "Exception when attempting to inspect the %s %s resource: %s\n"%(resource_name, kind, err) break if not err: self.show_rendered_templates(rendered_resources, 'resources', deploy_mode_bool) return err
def delete(self, context, params): err = "" # load context err = load_context(context) # do not proceed if context could not be loaded if err: return err # get the namespace from the current context self.namespace, err = get_current_namespace() # do not proceed if namespace not found if err: return err self.api = self.get_core_api() # render intrinsics in secrets rendered_secrets = apply_intrinsics(self.resource_array, params) # save a hash of the rendered secrets into the params for downstream # consumption set_secrets_hash(rendered_secrets, params) for secret in rendered_secrets: secret_name = secret['metadata']['name'] _secret_exists,err = self.secret_exists(secret_name) if _secret_exists and not err: try: body = kubernetes.client.V1DeleteOptions() print("deleting secret: %s"%secret_name) api_response = self.api.delete_namespaced_secret(secret_name, self.namespace, body) except ApiException as e: err = "\n%s"%(json.dumps(json.loads(e.body),indent=2)) return err
def delete(self, context, params): err = "" # load context err = load_context(context) # do not proceed if context could not be loaded if err: return err # get the namespace from the current context self.namespace, err = get_current_namespace() # do not proceed if namespace not found if err: return err self.api = self.get_apps_api() # render intrinsics in deployments rendered_deployments = apply_intrinsics(self.resource_array, params) for deployment in rendered_deployments: deployment_name = deployment['metadata']['name'] _deployment_exists, err = self.deployment_exists(deployment_name) if _deployment_exists and not err: try: body = kubernetes.client.V1DeleteOptions() print("deleting deployment: %s in namespace: %s" % (deployment_name, self.namespace)) api_response = self.api.delete_namespaced_deployment( deployment_name, self.namespace, body, propagation_policy='Orphan') except ApiException as e: err = "\n%s" % (json.dumps(json.loads(e.body), indent=2)) break return err
def test_include_nonexistant(self): params = {"user-name": {"value": "henry-grantham"}} test_dict = [{ "yac-include": "yac/tests/intrinsics/vectors/include.nonexistant" }] # test that an error is raised error_raised = False try: rendered_dict = apply_intrinsics(test_dict, Params(params)) except IntrinsicsError as e: print(e) error_raised = True self.assertTrue(error_raised)
def login(self): err = "" # render intrinsics in the registry rendered_registry = apply_intrinsics(self.registry, self.params) try: print("login using api client") response = self.client.login(username = rendered_registry['username'], password = rendered_registry['password'], registry = rendered_registry['host']) print(response) except docker.errors.APIError as ae: err = ae return err
def dryrun(self, params, deploy_mode_bool=False, context=""): self.params = params # initialize a session with boto3 self.session, err = get_session(params) self.name = get_stack_name(self.params) # deploy any boot files specified by the service self.boot_files.deploy(self.params, context, dry_run_bool=True) # apply intrinsics to the stack template rendered_stack_template = apply_intrinsics(self.serialize(), self.params) stack_exits_bool = self.exists() # determine if we are building or updating this stack action = UPDATING if stack_exits_bool else BUILDING stack_state = self.get_stack_state() service_name = self.params.get('service-name') service_alias = self.params.get('service-alias') servicefile_path = self.params.get('servicefile-path') print("%s (dry-run) the %s service aliased as '%s'" % (action, service_name, service_alias)) print("Stack state is currently: %s." % stack_state) print("Service stack will be named: %s" % self.name) # show the rendered templates self.show_rendered_templates(rendered_stack_template, deploy_mode_bool, service_alias) stack_params = self.cf_param_map.get_params_array( self.params, self.session, self.name) if stack_params: print("Param mapping:\n%s" % stack_params) print("Sanity check the params above.")
def _get_logger_configs(logs_full_path, log_level="INFO"): logs_path = os.path.dirname(logs_full_path) if not os.path.exists(logs_path): os.makedirs(logs_path) logger_configs_with_refs = {} with open(os.path.join(get_root_path(), "config/logger.conf"), 'r') as config_file: logger_configs_with_refs = json.load(config_file) key_values = Params({}) key_values.set("log-level", log_level) key_values.set("deploy-log-path", logs_full_path) # render log level and log path into logger configs return apply_intrinsics(logger_configs_with_refs, key_values)
def get_path(self, params): # default cache path cache_path = "" if self.enabled and not self.path: cache_path_base = get_home_dump_path(params.get('service-alias')) cache_path = os.path.join(cache_path_base, "params.json") elif self.enabled and type(self.path) is dict: # render intrinsics in the cache path # note: this allows dynamic params like 'env' to be used as a pivot # on the location of the cache file cache_path = apply_intrinsics(self.path, params) elif self.enabled and type(self.path) is str: cache_path = self.path return cache_path
def initialize(self, params): err = "" servicefile_path = params.get("servicefile-path", "") # apply intrinsics in the vault password vault_pwd = apply_intrinsics(self.vault_pwd, params) self.vault, err = load_dict_from_file(self.vault_path, servicefile_path) if not err: self.initialized = True self.ready = True else: err = "vault at %s does not exist" % self.vault_path print(err) return err
def test_join(self): params = { "image-name": { "type": "string", "value": "godzilla" }, "image-label": { "type": "boolean", "value": "1.0" } } test_dict = { "yac-join": [ ":", [{ "yac-join": [ "", [ "gitlab-registry.nordstrom.com/", { "yac-ref": "image-name" } ] ] }, { "yac-ref": "image-label" }] ] } # run test updated_dict = apply_intrinsics(test_dict, Params(params)) updated_dict_str = json.dumps(updated_dict) print(updated_dict_str) ref_check = "godzilla" in updated_dict_str self.assertTrue(ref_check)
def delete(self, context, params): err = "" # load context err = load_context(context) # do not proceed if context could not be loaded if err: return err # get the namespace from the current context self.namespace, err = get_current_namespace() # do not proceed if namespace not found if err: return err self.api = self.get_core_api() # render intrinsics in the services rendered_svcs = apply_intrinsics(self.resource_array, params) for service in rendered_svcs: service_name = service['metadata']['name'] _service_exists, err = self._service_exists(service_name) if _service_exists and not err: try: body = kubernetes.client.V1DeleteOptions() print("deleting service: %s in namespace: %s" % (service_name, self.namespace)) api_response = self.api.delete_namespaced_service( service_name, self.namespace, body) except ApiException as e: err = e return err
def initialize(self, params): servicefile_path = params.get("servicefile-path", "") # apply intrinsics in the vault password vault_pwd = apply_intrinsics(self.vault_pwd, params) full_path = os.path.join(servicefile_path, self.vault_path) if os.path.exists(full_path): if vault_pwd: try: self.kp = PyKeePass(full_path, password=vault_pwd) self.ready = True self.initialized = True except IOError as e: print(e) else: self.ready = False self.initialized = False
def test_default_name(self): params = { "service-alias": { "value": "jira" }, "availability-zones": { "value": ["us-west-2a"] } } test_dict = { "Type": "AWS::AutoScaling::AutoScalingGroup", "Name": { "yac-name": "asg" } } updated_dict = apply_intrinsics(test_dict, Params(params)) name_check = updated_dict['Name'] == 'jira-asg' self.assertTrue(name_check)
def cost(self, params, context=""): total_cost = 0 if self.resource_array: # render intrinsics in deployments rendered_deployments = apply_intrinsics(self.resource_array, params) mem_cost_map = {"129M": 3.96} per_cpu_cost = 15.72 resources = search("[*].spec.template.spec.resources.requests", rendered_deployments, []) pod_counts = search("[*].spec.replicas", rendered_deployments) mem_cost = 0 cpu_cost = 0 print( "deployment cost is based on number of pods and memory and cpu requests for each" ) for i, resource in enumerate(resources): pod_count = float(pod_counts[i]) if 'memory' in resource and resource['memory'] in mem_cost_map: mem_cost = mem_cost_map[resource['memory']] if 'cpu' in resource: cpu_cost = float(resource['cpu']) * per_cpu_cost total_cost = total_cost + 6.05 * (pod_count * (mem_cost + cpu_cost)) else: total_cost = total_cost + 6.05 * (pod_count) return total_cost
def _deploy_dirs(self, context, dry_run_bool): servicefile_path = self.params.get("servicefile-path") dump_path = get_dump_path(self.params.get("service-alias")) for this_idir in self.directories: # render intrinsics in the file dictionary this_dir = apply_intrinsics(this_idir, self.params) # render files under the dump path rendered_dir_path = os.path.join(dump_path, this_dir['src']) # render file variables in the source directory apply_templates_in_dir(this_dir['src'], self.params, rendered_dir_path, True) # if destination is s3 bucket if self._is_s3_destination(this_dir['dest']): # sync directories to s3 destination self.sync_dir_to_s3(context, rendered_dir_path, this_dir['dest'], dry_run_bool) # if destination is another local dir (mostly used for testing) elif not dry_run_bool: # clear destination dir if it exists if os.path.exists(this_dir['dest']): shutil.rmtree(this_dir['dest']) # recursively copy files to local directory shutil.copytree(rendered_dir_path, this_dir['dest']) if self.directories and dry_run_bool: print("Rendered boot directory files can be viewed under: %s" % (rendered_dir_path))