def decompose_containers(context, scenario): fileArgsToDockerCompose = getDockerComposeFileArgsFromYamlFile( context.compose_yaml) print("Decomposing with yaml '{0}' after scenario {1}, ".format( context.compose_yaml, scenario.name)) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker-compose"] + fileArgsToDockerCompose + ["unpause"], expect_success=True) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker-compose"] + fileArgsToDockerCompose + ["stop"], expect_success=True) #Save the coverage files for this scenario before removing containers if coverageEnabled(context): containerNames = [ containerData.containerName for containerData in context.compose_containers ] saveCoverageFiles("coverage", scenario.name.replace(" ", "_"), containerNames, "cov") context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker-compose"] + fileArgsToDockerCompose + ["rm","-f"], expect_success=True) # now remove any other containers (chaincodes) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker", "ps", "-qa"], expect_success=True) if context.compose_returncode == 0: # Remove each container for containerId in context.compose_output.splitlines(): context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker", "rm", "-f", containerId], expect_success=True)
def after_scenario(context, scenario): if scenario.status == "failed": get_logs = context.config.userdata.get("logs", "N") if get_logs.lower() == "y" : print("Scenario {0} failed. Getting container logs".format(scenario.name)) file_suffix = "_" + scenario.name.replace(" ", "_") + ".log" for containerData in context.compose_containers: with open(containerData.containerName + file_suffix, "w+") as logfile: sys_rc = subprocess.call(["docker", "logs", containerData.containerName], stdout=logfile, stderr=logfile) if sys_rc !=0 : print("Cannot get logs for {0}. Docker rc = {1}".format(containerData.containerName,sys_rc)) if 'doNotDecompose' in scenario.tags: print("Not going to decompose after scenario {0}, with yaml '{1}'".format(scenario.name, context.compose_yaml)) else: if 'compose_yaml' in context: fileArgsToDockerCompose = getDockerComposeFileArgsFromYamlFile(context.compose_yaml) print("Decomposing with yaml '{0}' after scenario {1}, ".format(context.compose_yaml, scenario.name)) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker-compose"] + fileArgsToDockerCompose + ["kill"], expect_success=True) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker-compose"] + fileArgsToDockerCompose + ["rm","-f"], expect_success=True) # now remove any other containers (chaincodes) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker", "ps", "-qa"], expect_success=True) if context.compose_returncode == 0: # Remove each container for containerId in context.compose_output.splitlines(): #print("docker rm {0}".format(containerId)) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker", "rm", "-f", containerId], expect_success=True)
def before_all(context): context.byon = os.path.exists("networkcredentials") context.remote_ip = context.config.userdata.get("remote-ip", None) context.tls = tlsEnabled(context) if context.byon: context = get_remote_servers(context) time.sleep(5) else: cli_call(["../build/bin/peer", "node", "stop"], expect_success=False)
def after_scenario(context, scenario): if 'doNotDecompose' in scenario.tags: print("Not going to decompose after scenario {0}, with yaml '{1}'".format(scenario.name, context.compose_yaml)) else: if 'compose_yaml' in context: print("Decomposing with yaml '{0}' after scenario {1}, ".format(context.compose_yaml, scenario.name)) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker-compose", "-f", context.compose_yaml, "kill"], expect_success=True) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker-compose", "-f", context.compose_yaml, "rm","-f"], expect_success=True)
def after_scenario(context, scenario): if scenario.status == "failed": get_logs = context.config.userdata.get("logs", "N") if get_logs.lower() == "y": print("Scenario {0} failed. Getting container logs".format( scenario.name)) file_suffix = "_" + scenario.name.replace(" ", "_") + ".log" # get logs from the peer containers for containerData in context.compose_containers: with open(containerData.containerName + file_suffix, "w+") as logfile: sys_rc = subprocess.call( ["docker", "logs", containerData.containerName], stdout=logfile, stderr=logfile) if sys_rc != 0: print( "Cannot get logs for {0}. Docker rc = {1}".format( containerData.containerName, sys_rc)) # get logs from the chaincode containers cc_output, cc_error, cc_returncode = \ cli_call(context, ["docker", "ps", "-f", "name=dev-", "--format", "{{.Names}}"], expect_success=True) for containerName in cc_output.splitlines(): namePart, sep, junk = containerName.rpartition("-") with open(namePart + file_suffix, "w+") as logfile: sys_rc = subprocess.call(["docker", "logs", containerName], stdout=logfile, stderr=logfile) if sys_rc != 0: print( "Cannot get logs for {0}. Docker rc = {1}".format( namepart, sys_rc)) if 'doNotDecompose' in scenario.tags: print("Not going to decompose after scenario {0}, with yaml '{1}'". format(scenario.name, context.compose_yaml)) else: if 'compose_yaml' in context: fileArgsToDockerCompose = getDockerComposeFileArgsFromYamlFile( context.compose_yaml) print("Decomposing with yaml '{0}' after scenario {1}, ".format( context.compose_yaml, scenario.name)) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker-compose"] + fileArgsToDockerCompose + ["kill"], expect_success=True) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker-compose"] + fileArgsToDockerCompose + ["rm","-f"], expect_success=True) # now remove any other containers (chaincodes) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker", "ps", "-qa"], expect_success=True) if context.compose_returncode == 0: # Remove each container for containerId in context.compose_output.splitlines(): #print("docker rm {0}".format(containerId)) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker", "rm", "-f", containerId], expect_success=True)
def before_all(context): context.byon = os.path.exists("networkcredentials") context.remote_ip = context.config.userdata.get("remote-ip", None) context.tls = tlsEnabled(context) print("TLS??", context.tls) if context.byon: context = get_remote_servers(context) if context.tls and context.remote_ip: print("Already restarted during 'pause'!!!") else: command = "export SUDO_ASKPASS=~/.remote_pass.sh;sudo iptables -D INPUT 1" ssh_call(context, command) else: cli_call(context, ["../build/bin/peer", "node", "stop"], expect_success=False)
def after_scenario(context, scenario): get_logs = context.config.userdata.get("logs", "N") if get_logs.lower() == "force" or (scenario.status == "failed" and get_logs.lower() == "y" and "compose_containers" in context): print("Scenario {0} failed. Getting container logs".format(scenario.name)) file_suffix = "_" + scenario.name.replace(" ", "_") + ".log" # get logs from the peer containers for containerData in context.compose_containers: with open(containerData.containerName + file_suffix, "w+") as logfile: sys_rc = subprocess.call(["docker", "logs", containerData.containerName], stdout=logfile, stderr=logfile) if sys_rc !=0 : print("Cannot get logs for {0}. Docker rc = {1}".format(containerData.containerName,sys_rc)) # get logs from the chaincode containers cc_output, cc_error, cc_returncode = \ cli_call(["docker", "ps", "-f", "name=dev-", "--format", "{{.Names}}"], expect_success=True) for containerName in cc_output.splitlines(): namePart,sep,junk = containerName.rpartition("-") with open(namePart + file_suffix, "w+") as logfile: sys_rc = subprocess.call(["docker", "logs", containerName], stdout=logfile, stderr=logfile) if sys_rc !=0 : print("Cannot get logs for {0}. Docker rc = {1}".format(namepart,sys_rc)) if 'doNotDecompose' in scenario.tags: if 'compose_yaml' in context: print("Not going to decompose after scenario {0}, with yaml '{1}'".format(scenario.name, context.compose_yaml)) elif 'composition' in context: if coverageEnabled(context): # First stop the containers to allow for coverage files to be created. context.composition.issueCommand(["stop"]) #Save the coverage files for this scenario before removing containers containerNames = [containerData.containerName for containerData in context.compose_containers] saveCoverageFiles("coverage", scenario.name.replace(" ", "_"), containerNames, "cov") context.composition.decompose()
def retrieve_logs(context, scenario): bdd_log("Scenario {0} failed. Getting container logs".format( scenario.name)) file_suffix = "_" + scenario.name.replace(" ", "_") + ".log" if context.compose_containers == []: bdd_log( "docker-compose command failed on '{0}'. There are no docker logs." .format(context.compose_yaml)) # get logs from the peer containers for containerData in context.compose_containers: with open(containerData.containerName + file_suffix, "w+") as logfile: sys_rc = subprocess.call( ["docker", "logs", containerData.containerName], stdout=logfile, stderr=logfile) if sys_rc != 0: bdd_log("Cannot get logs for {0}. Docker rc = {1}".format( containerData.containerName, sys_rc)) # get logs from the chaincode containers cc_output, cc_error, cc_returncode = \ cli_call(["docker", "ps", "-f", "name=dev-", "--format", "{{.Names}}"], expect_success=True) for containerName in cc_output.splitlines(): namePart, sep, junk = containerName.rpartition("-") with open(namePart + file_suffix, "w+") as logfile: sys_rc = subprocess.call(["docker", "logs", containerName], stdout=logfile, stderr=logfile) if sys_rc != 0: bdd_log("Cannot get logs for {0}. Docker rc = {1}".format( namepart, sys_rc))
def get_logs_from_chaincode_containers(context, file_suffix): cc_output, cc_error, cc_returncode = \ cli_call(context, ["docker", "ps", "-f", "name=dev-", "--format", "{{.Names}}"], expect_success=True) for containerName in cc_output.splitlines(): namePart, sep, junk = containerName.rpartition("-") with open(namePart + file_suffix, "w+") as logfile: sys_rc = subprocess.call(["docker", "logs", containerName], stdout=logfile, stderr=logfile) if sys_rc != 0: print("Cannot get logs for {0}. Docker rc = {1}".format( namepart, sys_rc))
def after_scenario(context, scenario): if 'doNotDecompose' in scenario.tags: print("Not going to decompose after scenario {0}, with yaml '{1}'". format(scenario.name, context.compose_yaml)) else: if 'compose_yaml' in context: print("Decomposing with yaml '{0}' after scenario {1}, ".format( context.compose_yaml, scenario.name)) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker-compose", "-f", context.compose_yaml, "kill"], expect_success=True) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker-compose", "-f", context.compose_yaml, "rm","-f"], expect_success=True) # now remove any other containers (chaincodes) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker", "ps", "-qa"], expect_success=True) if context.compose_returncode == 0: # Remove each container for containerId in context.compose_output.splitlines(): #print("docker rm {0}".format(containerId)) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(context, ["docker", "rm", containerId], expect_success=True)
def after_scenario(context, scenario): contextHelper = ContextHelper.GetHelper(context=context) contextHelper.after_scenario(scenario) get_logs = context.config.userdata.get("logs", "N") if get_logs.lower() == "force" or (scenario.status == "failed" and get_logs.lower() == "y" and "compose_containers" in context): print("Scenario {0} failed. Getting container logs".format( scenario.name)) file_suffix = "_" + scenario.name.replace(" ", "_") + ".log" # get logs from the peer containers for containerData in context.compose_containers: with open(containerData.containerName + file_suffix, "w+") as logfile: sys_rc = subprocess.call( ["docker", "logs", containerData.containerName], stdout=logfile, stderr=logfile) if sys_rc != 0: print("Cannot get logs for {0}. Docker rc = {1}".format( containerData.containerName, sys_rc)) # get logs from the chaincode containers cc_output, cc_error, cc_returncode = \ cli_call(["docker", "ps", "-f", "name=dev-", "--format", "{{.Names}}"], expect_success=True) for containerName in cc_output.splitlines(): namePart, sep, junk = containerName.rpartition("-") with open(namePart + file_suffix, "w+") as logfile: sys_rc = subprocess.call(["docker", "logs", containerName], stdout=logfile, stderr=logfile) if sys_rc != 0: print("Cannot get logs for {0}. Docker rc = {1}".format( namepart, sys_rc)) if 'doNotDecompose' in scenario.tags: if 'compose_yaml' in context: print("Not going to decompose after scenario {0}, with yaml '{1}'". format(scenario.name, context.compose_yaml)) elif 'composition' in context: if coverageEnabled(context): # First stop the containers to allow for coverage files to be created. context.composition.issueCommand(["stop"]) #Save the coverage files for this scenario before removing containers containerNames = [ containerData.containerName for containerData in context.compose_containers ] saveCoverageFiles("coverage", scenario.name.replace(" ", "_"), containerNames, "cov") context.composition.decompose() # Ask the directory to cleanup getDirectory(context).cleanup()
def after_scenario(context, scenario): get_logs = context.config.userdata.get("logs", "N") if get_logs.lower() == "force" or (scenario.status == "failed" and get_logs.lower() == "y" and "compose_containers" in context): bdd_log("Scenario {0} failed. Getting container logs".format(scenario.name)) file_suffix = "_" + scenario.name.replace(" ", "_") + ".log" # get logs from the peer containers for containerData in context.compose_containers: with open(containerData.name + file_suffix, "w+") as logfile: sys_rc = subprocess.call(["docker", "logs", containerData.name], stdout=logfile, stderr=logfile) if sys_rc !=0 : bdd_log("Cannot get logs for {0}. Docker rc = {1}".format(containerData.name,sys_rc)) # get logs from the chaincode containers cc_output, cc_error, cc_returncode = \ cli_call(["docker", "ps", "-f", "name=dev-", "--format", "{{.Names}}"], expect_success=True) for containerName in cc_output.splitlines(): namePart,sep,junk = containerName.rpartition("-") with open(namePart + file_suffix, "w+") as logfile: sys_rc = subprocess.call(["docker", "logs", containerName], stdout=logfile, stderr=logfile) if sys_rc !=0 : bdd_log("Cannot get logs for {0}. Docker rc = {1}".format(namepart,sys_rc)) if 'doNotDecompose' in scenario.tags: if 'compose_yaml' in context: bdd_log("Not going to decompose after scenario {0}, with yaml '{1}'".format(scenario.name, context.compose_yaml)) else: if 'compose_yaml' in context: fileArgsToDockerCompose = getDockerComposeFileArgsFromYamlFile(context.compose_yaml) bdd_log("Decomposing with yaml '{0}' after scenario {1}, ".format(context.compose_yaml, scenario.name)) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(["docker-compose"] + fileArgsToDockerCompose + ["unpause"], expect_success=True) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(["docker-compose"] + fileArgsToDockerCompose + ["stop"], expect_success=True) if coverageEnabled(context): #Save the coverage files for this scenario before removing containers containerNames = [containerData.name for containerData in context.compose_containers] saveCoverageFiles("coverage", scenario.name.replace(" ", "_"), containerNames, "cov") context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(["docker-compose"] + fileArgsToDockerCompose + ["rm","-f"], expect_success=True) # now remove any other containers (chaincodes) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(["docker", "ps", "-qa"], expect_success=True) if context.compose_returncode == 0: # Remove each container for containerId in context.compose_output.splitlines(): #bdd_log("docker rm {0}".format(containerId)) context.compose_output, context.compose_error, context.compose_returncode = \ cli_call(["docker", "rm", "-f", containerId], expect_success=True)
def before_all(context): cli_call(context, ["../peer/peer", "stop"], expect_success=False)
def before_all(context): cli_call(context, ["../build/bin/peer", "node", "stop"], expect_success=False)
def after_scenario(context, scenario): if "doNotDecompose" in scenario.tags: print("Not going to decompose after scenario {0}, with yaml '{1}'".format(scenario.name, context.compose_yaml)) else: if "compose_yaml" in context: print("Decomposing with yaml '{0}' after scenario {1}, ".format(context.compose_yaml, scenario.name)) context.compose_output, context.compose_error, context.compose_returncode = cli_call( context, ["docker-compose", "-f", context.compose_yaml, "kill"], expect_success=True ) context.compose_output, context.compose_error, context.compose_returncode = cli_call( context, ["docker-compose", "-f", context.compose_yaml, "rm", "-f"], expect_success=True ) # now remove any other containers (chaincodes) context.compose_output, context.compose_error, context.compose_returncode = cli_call( context, ["docker", "ps", "-qa"], expect_success=True ) if context.compose_returncode == 0: # Remove each container for containerId in context.compose_output.splitlines(): # print("docker rm {0}".format(containerId)) context.compose_output, context.compose_error, context.compose_returncode = cli_call( context, ["docker", "rm", containerId], expect_success=True )
def before_all(context): cli_call(["../build/bin/peer", "node", "stop"], expect_success=False)