def after_scenario(context, scenario): get_logs = context.config.userdata.get("logs", "N") if get_logs.lower() == "force" or (scenario.status == "failed" and get_logs.lower() == "y" and "compose_containers" in context): print("Scenario {0} failed. Getting container logs".format(scenario.name)) file_suffix = "_" + scenario.name.replace(" ", "_") + ".log" # get logs from the peer containers for containerData in context.compose_containers: with open(containerData.containerName + file_suffix, "w+") as logfile: sys_rc = subprocess.call(["docker", "logs", containerData.containerName], stdout=logfile, stderr=logfile) if sys_rc !=0 : print("Cannot get logs for {0}. Docker rc = {1}".format(containerData.containerName,sys_rc)) # get logs from the chaincode containers cc_output, cc_error, cc_returncode = \ cli_call(["docker", "ps", "-f", "name=dev-", "--format", "{{.Names}}"], expect_success=True) for containerName in cc_output.splitlines(): namePart,sep,junk = containerName.rpartition("-") with open(namePart + file_suffix, "w+") as logfile: sys_rc = subprocess.call(["docker", "logs", containerName], stdout=logfile, stderr=logfile) if sys_rc !=0 : print("Cannot get logs for {0}. Docker rc = {1}".format(namepart,sys_rc)) if 'doNotDecompose' in scenario.tags: if 'compose_yaml' in context: print("Not going to decompose after scenario {0}, with yaml '{1}'".format(scenario.name, context.compose_yaml)) elif 'composition' in context: if coverageEnabled(context): # First stop the containers to allow for coverage files to be created. context.composition.issueCommand(["stop"]) #Save the coverage files for this scenario before removing containers containerNames = [containerData.containerName for containerData in context.compose_containers] saveCoverageFiles("coverage", scenario.name.replace(" ", "_"), containerNames, "cov") context.composition.decompose()
def decompose_containers(context, scenario): fileArgsToDockerCompose = getDockerComposeFileArgsFromYamlFile( context.compose_yaml) print("Decomposing with yaml '{0}' after scenario {1}, ".format( context.compose_yaml, scenario.name)) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker-compose"] + fileArgsToDockerCompose + ["unpause"], expect_success=True) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker-compose"] + fileArgsToDockerCompose + ["stop"], expect_success=True) #Save the coverage files for this scenario before removing containers if coverageEnabled(context): containerNames = [ containerData.containerName for containerData in context.compose_containers ] saveCoverageFiles("coverage", scenario.name.replace(" ", "_"), containerNames, "cov") context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker-compose"] + fileArgsToDockerCompose + ["rm","-f"], expect_success=True) # now remove any other containers (chaincodes) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker", "ps", "-qa"], expect_success=True) if context.compose_returncode == 0: # Remove each container for containerId in context.compose_output.splitlines(): context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker", "rm", "-f", containerId], expect_success=True)
def after_scenario(context, scenario): # Handle logs get_logs = context.config.userdata.get("logs", "N") if get_logs.lower() == "force" or (scenario.status == "failed" and get_logs.lower() == "y"): retrieve_logs(context, scenario) # Handle coverage if coverageEnabled(context): for containerData in context.compose_containers: #Save the coverage files for this scenario before removing containers containerNames = [ containerData.name for containerData in context.compose_containers ] saveCoverageFiles("coverage", scenario.name.replace(" ", "_"), containerNames, "cov") # Handle decomposition if 'doNotDecompose' in scenario.tags and 'compose_yaml' in context: bdd_log("Not going to decompose after scenario {0}, with yaml '{1}'". format(scenario.name, context.compose_yaml)) elif context.byon: bdd_log("Stopping a BYON (Bring Your Own Network) setup") decompose_remote(context, scenario) elif 'compose_yaml' in context: decompose_containers(context, scenario) else: bdd_log("Nothing to stop in this setup")
def after_scenario(context, scenario): contextHelper = ContextHelper.GetHelper(context=context) contextHelper.after_scenario(scenario) get_logs = context.config.userdata.get("logs", "N") if get_logs.lower() == "force" or (scenario.status == "failed" and get_logs.lower() == "y" and "compose_containers" in context): print("Scenario {0} failed. Getting container logs".format( scenario.name)) file_suffix = "_" + scenario.name.replace(" ", "_") + ".log" # get logs from the peer containers for containerData in context.compose_containers: with open(containerData.containerName + file_suffix, "w+") as logfile: sys_rc = subprocess.call( ["docker", "logs", containerData.containerName], stdout=logfile, stderr=logfile) if sys_rc != 0: print("Cannot get logs for {0}. Docker rc = {1}".format( containerData.containerName, sys_rc)) # get logs from the chaincode containers cc_output, cc_error, cc_returncode = \ cli_call(["docker", "ps", "-f", "name=dev-", "--format", "{{.Names}}"], expect_success=True) for containerName in cc_output.splitlines(): namePart, sep, junk = containerName.rpartition("-") with open(namePart + file_suffix, "w+") as logfile: sys_rc = subprocess.call(["docker", "logs", containerName], stdout=logfile, stderr=logfile) if sys_rc != 0: print("Cannot get logs for {0}. Docker rc = {1}".format( namepart, sys_rc)) if 'doNotDecompose' in scenario.tags: if 'compose_yaml' in context: print("Not going to decompose after scenario {0}, with yaml '{1}'". format(scenario.name, context.compose_yaml)) elif 'composition' in context: if coverageEnabled(context): # First stop the containers to allow for coverage files to be created. context.composition.issueCommand(["stop"]) #Save the coverage files for this scenario before removing containers containerNames = [ containerData.containerName for containerData in context.compose_containers ] saveCoverageFiles("coverage", scenario.name.replace(" ", "_"), containerNames, "cov") context.composition.decompose() # Ask the directory to cleanup getDirectory(context).cleanup()
def after_scenario(context, scenario): get_logs = context.config.userdata.get("logs", "N") if get_logs.lower() == "force" or (scenario.status == "failed" and get_logs.lower() == "y" and "compose_containers" in context): bdd_log("Scenario {0} failed. Getting container logs".format(scenario.name)) file_suffix = "_" + scenario.name.replace(" ", "_") + ".log" # get logs from the peer containers for containerData in context.compose_containers: with open(containerData.name + file_suffix, "w+") as logfile: sys_rc = subprocess.call(["docker", "logs", containerData.name], stdout=logfile, stderr=logfile) if sys_rc !=0 : bdd_log("Cannot get logs for {0}. Docker rc = {1}".format(containerData.name,sys_rc)) # get logs from the chaincode containers cc_output, cc_error, cc_returncode = \ cli_call(["docker", "ps", "-f", "name=dev-", "--format", "{{.Names}}"], expect_success=True) for containerName in cc_output.splitlines(): namePart,sep,junk = containerName.rpartition("-") with open(namePart + file_suffix, "w+") as logfile: sys_rc = subprocess.call(["docker", "logs", containerName], stdout=logfile, stderr=logfile) if sys_rc !=0 : bdd_log("Cannot get logs for {0}. Docker rc = {1}".format(namepart,sys_rc)) if 'doNotDecompose' in scenario.tags: if 'compose_yaml' in context: bdd_log("Not going to decompose after scenario {0}, with yaml '{1}'".format(scenario.name, context.compose_yaml)) else: if 'compose_yaml' in context: fileArgsToDockerCompose = getDockerComposeFileArgsFromYamlFile(context.compose_yaml) bdd_log("Decomposing with yaml '{0}' after scenario {1}, ".format(context.compose_yaml, scenario.name)) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(["docker-compose"] + fileArgsToDockerCompose + ["unpause"], expect_success=True) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(["docker-compose"] + fileArgsToDockerCompose + ["stop"], expect_success=True) if coverageEnabled(context): #Save the coverage files for this scenario before removing containers containerNames = [containerData.name for containerData in context.compose_containers] saveCoverageFiles("coverage", scenario.name.replace(" ", "_"), containerNames, "cov") context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(["docker-compose"] + fileArgsToDockerCompose + ["rm","-f"], expect_success=True) # now remove any other containers (chaincodes) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(["docker", "ps", "-qa"], expect_success=True) if context.compose_returncode == 0: # Remove each container for containerId in context.compose_output.splitlines(): #bdd_log("docker rm {0}".format(containerId)) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(["docker", "rm", "-f", containerId], expect_success=True)
def after_scenario(context, scenario): get_logs = context.config.userdata.get("logs", "N") if get_logs.lower() == "force" or (scenario.status == "failed" and get_logs.lower() == "y"): print("Scenario {0} failed. Getting container logs".format(scenario.name)) file_suffix = "_" + scenario.name.replace(" ", "_") + ".log" # get logs from the peer containers for containerData in context.compose_containers: with open(containerData.containerName + file_suffix, "w+") as logfile: sys_rc = subprocess.call(["docker", "logs", containerData.containerName], stdout=logfile, stderr=logfile) if sys_rc !=0 : print("Cannot get logs for {0}. Docker rc = {1}".format(containerData.containerName,sys_rc)) # get logs from the chaincode containers cc_output, cc_error, cc_returncode = \ cli_call(context, ["docker", "ps", "-f", "name=dev-", "--format", "{{.Names}}"], expect_success=True) for containerName in cc_output.splitlines(): namePart,sep,junk = containerName.rpartition("-") with open(namePart + file_suffix, "w+") as logfile: sys_rc = subprocess.call(["docker", "logs", containerName], stdout=logfile, stderr=logfile) if sys_rc !=0 : print("Cannot get logs for {0}. Docker rc = {1}".format(namepart,sys_rc)) if 'doNotDecompose' in scenario.tags: if 'compose_yaml' in context: print("Not going to decompose after scenario {0}, with yaml '{1}'".format(scenario.name, context.compose_yaml)) else: if 'compose_yaml' in context: fileArgsToDockerCompose = getDockerComposeFileArgsFromYamlFile(context.compose_yaml) print("Decomposing with yaml '{0}' after scenario {1}, ".format(context.compose_yaml, scenario.name)) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker-compose"] + fileArgsToDockerCompose + ["unpause"], expect_success=True) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker-compose"] + fileArgsToDockerCompose + ["stop"], expect_success=True) if coverageEnabled(context): #Save the coverage files for this scenario before removing containers containerNames = [containerData.containerName for containerData in context.compose_containers] saveCoverageFiles("coverage", scenario.name.replace(" ", "_"), containerNames, "cov") context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker-compose"] + fileArgsToDockerCompose + ["rm","-f"], expect_success=True) # now remove any other containers (chaincodes) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker", "ps", "-qa"], expect_success=True) if context.compose_returncode == 0: # Remove each container for containerId in context.compose_output.splitlines(): #print("docker rm {0}".format(containerId)) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker", "rm", "-f", containerId], expect_success=True)