def load_all(self, root): """Load all data generated by a testsuite run. :param root: testsuite root directory :type root: str """ print("== Registering test execution dumps from %s ..." % root) # Search for all directories containing a testcase status # dump file and load the data available there for p in find(root, STATUSDATA_FILE, follow_symlinks=True): self.load_test(os.path.dirname(p))
def get_test_list(self, sublist): """Retrieve the list of tests. The default method looks for all test.yaml files in the test directory. If a test.yaml has a variants field, the test is expanded in several test, each test being associated with a given variant. This function may be overriden. At this stage the self.global_env (after update by the tear_up procedure) is available. :param sublist: a list of tests scenarios or patterns :type sublist: list[str] :return: the list of selected test :rtype: list[str] """ # First retrive the list of test.yaml files result = [ os.path.relpath(p, self.test_dir).replace('\\', '/') for p in find(self.test_dir, 'test.yaml') ] if sublist: logging.info('filter: %s' % sublist) filtered_result = [] path_selectors = [] for s in sublist: subdir = os.path.relpath(os.path.abspath(s), self.test_dir) if s.endswith('/') or s.endswith('\\'): subdir += '/' path_selectors.append(subdir) for p in result: for s in path_selectors: # Either we have a match or the selected path is the # tests root dir or a parent. if s == '.' or s == './' or s.startswith('..') or \ re.match(s, p): filtered_result.append(p) continue result = filtered_result logging.info('Found %s tests', len(result)) logging.debug("tests:\n " + "\n ".join(result)) return result
def build_tor(self): # If we have a local testsuite dir at hand and we haven't done so # already, fetch the testsresults that the QM needs to check TOR/TC # consistency: if self.local_testsuite_dir and self.passno == 1: os.chdir(self.local_testsuite_dir) def sync(tr): target_dir = os.path.join(self.repodir, "testsuite", os.path.dirname(tr)) if os.path.exists(target_dir): cp(tr, target_dir) else: print("ERRRR !! inexistant target dir for %s" % tr) [sync(tr) for tr in find(root=".", pattern="tc.dump")] self.__qm_build(part="tor")
def execute_for_stack(self, stack: Stack) -> int: """Execute application for a given stack and return exit status. :param Stack: the stack on which the application executes """ assert self.args is not None try: if self.args.command in ("push", "update"): # Synchronize resources to the S3 bucket s3 = self.aws_env.client("s3") with tempfile.TemporaryDirectory() as tempd: # Push data associated with CFNMain and then all data # related to the stack self.create_data_dir(root_dir=tempd) stack.create_data_dir(root_dir=tempd) if self.s3_data_key is not None: # synchronize data to the bucket before creating the stack for f in find(tempd): with open(f, "rb") as fd: subkey = os.path.relpath(f, tempd).replace( "\\", "/") logging.info( "Upload %s to %s:%s%s", subkey, self.s3_bucket, self.s3_data_key, subkey, ) s3.put_object( Bucket=self.s3_bucket, Body=fd, ServerSideEncryption="AES256", Key=self.s3_data_key + subkey, ) if self.s3_template_key is not None: logging.info("Upload template to %s:%s", self.s3_bucket, self.s3_template_key) s3.put_object( Bucket=self.s3_bucket, Body=stack.body.encode("utf-8"), ServerSideEncryption="AES256", Key=self.s3_template_key, ) logging.info("Validate template for stack %s" % stack.name) try: stack.validate(url=self.s3_template_url) except Exception: logging.error("Invalid cloud formation template") logging.error(stack.body) raise if stack.exists(): changeset_name = "changeset%s" % int(time.time()) logging.info("Push changeset: %s" % changeset_name) stack.create_change_set(changeset_name, url=self.s3_template_url) result = stack.describe_change_set(changeset_name) while result["Status"] in ("CREATE_PENDING", "CREATE_IN_PROGRESS"): time.sleep(1.0) result = stack.describe_change_set(changeset_name) if result["Status"] == "FAILED": change_executed = False if ("The submitted information didn't contain changes" in result["StatusReason"]): logging.warning(result["StatusReason"]) change_executed = True else: logging.error(result["StatusReason"]) stack.delete_change_set(changeset_name) if not change_executed: return 1 else: for el in result["Changes"]: if "ResourceChange" not in el: continue logging.info( "%-8s %-32s: (replacement:%s)", el["ResourceChange"].get("Action"), el["ResourceChange"].get("LogicalResourceId"), el["ResourceChange"].get("Replacement", "n/a"), ) if self.args.apply_changeset: ask = input("Apply change (y/N): ") if ask[0] in "Yy": stack.execute_change_set( changeset_name=changeset_name, wait=self.args.wait_stack_creation, ) return 0 else: logging.info("Create new stack") stack.create(url=self.s3_template_url, wait=self.args.wait_stack_creation) elif self.args.command == "show": print(stack.body) elif self.args.command == "protect": # Enable termination protection result = stack.enable_termination_protection() if self.stack_policy_body is not None: stack.set_stack_policy(self.stack_policy_body) else: print("No stack policy to set") elif self.args.command == "show-cfn-policy": try: print( json.dumps( stack.cfn_policy_document(). as_dict, # type: ignore indent=2, )) except AttributeError as attr_e: print( f"command supported only with troposphere stacks: {attr_e}" ) elif self.args.command == "delete": stack.delete(wait=self.args.wait_stack_creation) return 0 except botocore.exceptions.ClientError as e: logging.error(str(e)) return 1
def execute(self, args=None, known_args_only=False, aws_env=None): """Execute application and return exit status. See parse_args arguments. """ super(CFNMain, self).parse_args(args, known_args_only) if aws_env is not None: self.aws_env = aws_env else: self.aws_env = AWSEnv(regions=self.regions, profile=self.args.profile) self.aws_env.default_region = self.args.region try: if self.args.command == 'push': if self.data_dir is not None and self.s3_data_key is not None: s3 = self.aws_env.client('s3') # synchronize data to the bucket before creating the stack for f in find(self.data_dir): with open(f, 'rb') as fd: subkey = os.path.relpath(f, self.data_dir).replace( '\\', '/') logging.info('Upload %s to %s:%s%s', subkey, self.s3_bucket, self.s3_data_key, subkey) s3.put_object(Bucket=self.s3_bucket, Body=fd, ServerSideEncryption='AES256', Key=self.s3_data_key + subkey) s = self.create_stack() if self.s3_template_key is not None: logging.info('Upload template to %s:%s', self.s3_bucket, self.s3_template_key) s3.put_object(Bucket=self.s3_bucket, Body=s.body.encode('utf-8'), ServerSideEncryption='AES256', Key=self.s3_template_key) logging.info('Validate template for stack %s' % s.name) s.validate(url=self.s3_template_url) if s.exists(): changeset_name = 'changeset%s' % int(time.time()) logging.info('Push changeset: %s' % changeset_name) s.create_change_set(changeset_name, url=self.s3_template_url) result = s.describe_change_set(changeset_name) while result['Status'] in ('CREATE_PENDING', 'CREATE_IN_PROGRESS'): time.sleep(1.0) result = s.describe_change_set(changeset_name) if result['Status'] == 'FAILED': logging.error(result['StatusReason']) s.delete_change_set(changeset_name) return 1 else: print(yaml.safe_dump(result['Changes'])) return 0 else: logging.info('Create new stack') s.create(url=self.s3_template_url) state = s.state() if self.args.wait: while 'PROGRESS' in state['Stacks'][0]['StackStatus']: result = s.resource_status(in_progress_only=False) print(result) time.sleep(10.0) state = s.state() else: return 1 return 0 except botocore.exceptions.ClientError as e: logging.error(str(e)) return 1
def gplize(anod_instance, src_dir, force=False): """Remove GPL specific exception. This operate recursively on all .h .c .ad* .gp* files present in the directory passed as parameter :param anod_instance: an Anod instance :type anod_instance: Anod :param src_dir: the directory to process :type src_dir: str :param force: force transformation to gpl :type force: bool """ def remove_paragraph(filename): begin = '-- .*As a .*special .*exception.* if other '\ 'files .*instantiate .*generics from .*(this)? .*|'\ '-- .*As a .*special .*exception under Section 7 of GPL '\ 'version 3, you are.*|'\ ' \* .*As a .*special .*exception.* if you .*link .*this'\ ' file .*with other .*files to.*|'\ ' \* .*As a .*special .*exception under Section 7 of GPL '\ 'version 3, you are.*|'\ '\/\/ .*As a .*special .*exception.* if other files '\ '.*instantiate .*generics from this.*|'\ '\/\/ .*As a .*special .*exception under Section 7 of GPL '\ 'version 3, you are.*' end = '-- .*covered .*by .*the .*GNU Public License.*|'\ '-- .*version 3.1, as published by the Free Software '\ 'Foundation.*--|'\ '\/\/ .*covered by the GNU Public License.*|'\ '.*file .*might be covered by the GNU Public License.*|'\ '\/\/ .*version 3.1, as published by the Free Software'\ ' Foundation.*\/\/|'\ ' \* .*version 3.1, as published by the Free Software'\ ' Foundation.*\*' output = StringIO() state = 2 i = 0 try: with open(filename) as f: for line in f: # Detect comment type if i == 1: comment = line[0:2] comment1 = comment comment2 = comment if comment == ' *': comment2 = '* ' i += 1 # Detect begining of exception paragraph if re.match(begin, line): state = 0 output.write( comment1 + (74 * " ") + comment2 + "\n") continue # Detect end of exception paragraph if re.match(end, line): if state == 0: state = 1 output.write( comment1 + (74 * " ") + comment2 + "\n") continue # Skip one line after the paragraph if state == 1: state = 3 # Replace exception lines with blank comment lines if state == 0: output.write( comment1 + (74 * " ") + comment2 + "\n") continue # Write non exception lines if (state == 2) or (state == 3): output.write(line) if state == 0: raise AnodError( 'gplize: End of paragraph was not detected in %s' % ( filename)) with open(filename, "w") as dest_f: dest_f.write(output.getvalue()) finally: output.close() if anod_instance.sandbox.config.get('release_mode', '') == 'gpl' or force: anod_instance.log.debug('move files to GPL license') rm(os.path.join(src_dir, 'COPYING.RUNTIME')) gpb_files = find(src_dir, "*.gp*") ada_files = find(src_dir, "*.ad*") c_files = find(src_dir, "*.[hc]") java_files = find(src_dir, "*.java") for l in (gpb_files, ada_files, c_files, java_files): for k in l: remove_paragraph(k)
def execute(self, args=None, known_args_only=False, aws_env=None): """Execute application and return exit status. See parse_args arguments. """ super(CFNMain, self).parse_args(args, known_args_only) if aws_env is not None: self.aws_env = aws_env else: if self.assume_role: main_session = Session(regions=self.regions, profile=self.args.profile) self.aws_env = main_session.assume_role( self.assume_role[0], self.assume_role[1]) # ??? needed since we still use a global variable for AWSEnv Env().aws_env = self.aws_env else: self.aws_env = AWSEnv(regions=self.regions, profile=self.args.profile) self.aws_env.default_region = self.args.region try: if self.args.command in ("push", "update"): if self.data_dir is not None and self.s3_data_key is not None: s3 = self.aws_env.client("s3") # synchronize data to the bucket before creating the stack for f in find(self.data_dir): with open(f, "rb") as fd: subkey = os.path.relpath(f, self.data_dir).replace( "\\", "/") logging.info( "Upload %s to %s:%s%s", subkey, self.s3_bucket, self.s3_data_key, subkey, ) s3.put_object( Bucket=self.s3_bucket, Body=fd, ServerSideEncryption="AES256", Key=self.s3_data_key + subkey, ) s = self.create_stack() if self.s3_template_key is not None: logging.info("Upload template to %s:%s", self.s3_bucket, self.s3_template_key) s3.put_object( Bucket=self.s3_bucket, Body=s.body.encode("utf-8"), ServerSideEncryption="AES256", Key=self.s3_template_key, ) logging.info("Validate template for stack %s" % s.name) try: s.validate(url=self.s3_template_url) except Exception: logging.error("Invalid cloud formation template") logging.error(s.body) raise if s.exists(): changeset_name = "changeset%s" % int(time.time()) logging.info("Push changeset: %s" % changeset_name) s.create_change_set(changeset_name, url=self.s3_template_url) result = s.describe_change_set(changeset_name) while result["Status"] in ("CREATE_PENDING", "CREATE_IN_PROGRESS"): time.sleep(1.0) result = s.describe_change_set(changeset_name) if result["Status"] == "FAILED": logging.error(result["StatusReason"]) s.delete_change_set(changeset_name) return 1 else: for el in result["Changes"]: if "ResourceChange" not in el: continue logging.info( "%-8s %-32s: (replacement:%s)", el["ResourceChange"].get("Action"), el["ResourceChange"].get("LogicalResourceId"), el["ResourceChange"].get("Replacement", "n/a"), ) if self.args.apply_changeset: ask = input("Apply change (y/N): ") if ask[0] in "Yy": return s.execute_change_set( changeset_name=changeset_name, wait=True) return 0 else: logging.info("Create new stack") s.create(url=self.s3_template_url) state = s.state() if self.args.wait_stack_creation: logging.info("waiting for stack creation...") while "PROGRESS" in state["Stacks"][0]["StackStatus"]: result = s.resource_status(in_progress_only=False) time.sleep(10.0) state = s.state() logging.info("done") elif self.args.command == "show": s = self.create_stack() print(s.body) elif self.args.command == "protect": s = self.create_stack() # Enable termination protection result = s.enable_termination_protection() if self.stack_policy_body is not None: s.set_stack_policy(self.stack_policy_body) else: print("No stack policy to set") return 0 except botocore.exceptions.ClientError as e: logging.error(str(e)) return 1
def execute_for_stack(self, stack: Stack) -> int: """Execute application for a given stack and return exit status. :param Stack: the stack on which the application executes """ try: if self.args.command in ("push", "update"): if self.data_dir is not None and self.s3_data_key is not None: s3 = self.aws_env.client("s3") # synchronize data to the bucket before creating the stack for f in find(self.data_dir): with open(f, "rb") as fd: subkey = os.path.relpath(f, self.data_dir).replace( "\\", "/" ) logging.info( "Upload %s to %s:%s%s", subkey, self.s3_bucket, self.s3_data_key, subkey, ) s3.put_object( Bucket=self.s3_bucket, Body=fd, ServerSideEncryption="AES256", Key=self.s3_data_key + subkey, ) if self.s3_template_key is not None: logging.info( "Upload template to %s:%s", self.s3_bucket, self.s3_template_key ) s3.put_object( Bucket=self.s3_bucket, Body=stack.body.encode("utf-8"), ServerSideEncryption="AES256", Key=self.s3_template_key, ) logging.info("Validate template for stack %s" % stack.name) try: stack.validate(url=self.s3_template_url) except Exception: logging.error("Invalid cloud formation template") logging.error(stack.body) raise if stack.exists(): changeset_name = "changeset%s" % int(time.time()) logging.info("Push changeset: %s" % changeset_name) stack.create_change_set(changeset_name, url=self.s3_template_url) result = stack.describe_change_set(changeset_name) while result["Status"] in ("CREATE_PENDING", "CREATE_IN_PROGRESS"): time.sleep(1.0) result = stack.describe_change_set(changeset_name) if result["Status"] == "FAILED": logging.error(result["StatusReason"]) stack.delete_change_set(changeset_name) return 1 else: for el in result["Changes"]: if "ResourceChange" not in el: continue logging.info( "%-8s %-32s: (replacement:%s)", el["ResourceChange"].get("Action"), el["ResourceChange"].get("LogicalResourceId"), el["ResourceChange"].get("Replacement", "n/a"), ) if self.args.apply_changeset: ask = input("Apply change (y/N): ") if ask[0] in "Yy": return stack.execute_change_set( changeset_name=changeset_name, wait=True ) return 0 else: logging.info("Create new stack") stack.create(url=self.s3_template_url) state = stack.state() if self.args.wait_stack_creation: logging.info("waiting for stack creation...") while "PROGRESS" in state["Stacks"][0]["StackStatus"]: result = stack.resource_status(in_progress_only=False) time.sleep(10.0) state = stack.state() logging.info("done") elif self.args.command == "show": print(stack.body) elif self.args.command == "protect": # Enable termination protection result = stack.enable_termination_protection() if self.stack_policy_body is not None: stack.set_stack_policy(self.stack_policy_body) else: print("No stack policy to set") return 0 except botocore.exceptions.ClientError as e: logging.error(str(e)) return 1