def runUnitTests(env, buildWorkspace): """ Calls `grok/run_tests.sh` to run the unit tests :param dict env: Current environ set for GROK_HOME, etc :param str buildWorkspace: /path/to/buildWorkspace :returns: return True if tests are successful :rtype: bool """ rawResultsFile = os.path.join(buildWorkspace, "numenta-apps", "grok", "tests", "results", "py2", "xunit", "jenkins", "results.xml") finalResultsFile = os.path.join(prepareResultsDir(), "unit_tests_%s_results.xml" % getBuildNumber(logger=g_logger)) with changeToWorkingDir(os.path.join(buildWorkspace, "numenta-apps", "grok")): try: runWithOutput(command=("./run_tests.sh --unit --language py --results " "jenkins"), env=env, logger=g_logger) except CommandFailedError: g_logger.exception("Failed to run unit tests") raise finally: shutil.move(rawResultsFile, finalResultsFile) return analyzeResults(resultsPath=finalResultsFile)
def main(args): """ Main function for the pipeline. Executes all sub-tasks :param args: Parsed command line arguments """ logger = initPipelineLogger("manifest", logLevel=args.logLevel) buildWorkspace = os.environ.get("BUILD_WORKSPACE", None) if not buildWorkspace: baseDir = jenkins.getWorkspace() buildId = jenkins.getBuildNumber() buildWorkspace = mkdtemp(prefix=buildId, dir=baseDir) manifest = vars(args) # Update buildWorkspace in manifest section for pipelineJson manifest.update({"buildWorkspace": buildWorkspace}) manifestEnv = {"manifest": manifest} with open("%s/%s_pipeline.json" % (buildWorkspace, args.pipeline), 'w') as fp: fp.write(json.dumps(manifestEnv, ensure_ascii=False)) logger.debug(json.dumps(manifestEnv)) pipelineJsonPath = "%s/%s_pipeline.json" % (buildWorkspace, args.pipeline) logger.info("Pipeline JSON path: %s", pipelineJsonPath) return pipelineJsonPath
def runUnitTests(env, buildWorkspace): """ Calls `htm-it/run_tests.sh` to run the unit tests :param dict env: Current environ set for HTM_IT_HOME, etc :param str buildWorkspace: /path/to/buildWorkspace :returns: return True if tests are successful :rtype: bool """ rawResultsFile = os.path.join(buildWorkspace, "numenta-apps", "htm.it", "tests", "results", "py2", "xunit", "jenkins", "results.xml") finalResultsFile = os.path.join(prepareResultsDir(), "unit_tests_%s_results.xml" % getBuildNumber(logger=g_logger)) with changeToWorkingDir(os.path.join(buildWorkspace, "numenta-apps", "htm.it")): try: runWithOutput(command=("./run_tests.sh --unit --language py --results " "jenkins"), env=env, logger=g_logger) except CommandFailedError: g_logger.exception("Failed to run unit tests") raise finally: shutil.move(rawResultsFile, finalResultsFile) return analyzeResults(resultsPath=finalResultsFile)
def recordXunitTestsResults(): """ This updates result generated for tests which does report formatted test result. Results are updated to a directory name masterResults where jenkins can find it. Results are archived by jenkins for each build. """ jobResultsDir = os.path.join(os.environ["BUILD_WORKSPACE"], "products") masterResults = prepareResultsDir() jobNumber = getBuildNumber() def attemptResultUpdate(task): originalResultsFile = "%s_unit_test_results.xml" % task newResultsFile = "%s_unit_test_results_%s.xml" % (task, jobNumber) if os.path.exists(os.path.join(jobResultsDir, originalResultsFile)): shutil.move(os.path.join(jobResultsDir, originalResultsFile), os.path.join(masterResults, newResultsFile)) attemptResultUpdate("grok") attemptResultUpdate("htmengine") attemptResultUpdate("nta.utils")
def recordXunitTestsResults(): """ This updates result generated for tests which does report formatted test result. Results are updated to a directory name masterResults where jenkins can find it. Results are archived by jenkins for each build. """ jobResultsDir = os.path.join(os.environ["BUILD_WORKSPACE"], "products") masterResults = prepareResultsDir() jobNumber = getBuildNumber() def attemptResultUpdate(task): originalResultsFile = "%s_unit_test_results.xml" % task newResultsFile = "%s_unit_test_results_%s.xml" % (task, jobNumber) if os.path.exists(os.path.join(jobResultsDir, originalResultsFile)): shutil.move(os.path.join(jobResultsDir, originalResultsFile), os.path.join(masterResults, newResultsFile)) attemptResultUpdate("YOMP") attemptResultUpdate("htmengine") attemptResultUpdate("nta.utils")
def main(jsonArgs=None): """ Creates an AMI using a HTM-IT RPM for a given SHA. 1) Downloads the HTM-IT RPM corresponding to a given SHA to local disk 2) Calls bake_ami.sh with the name of the HTM-IT RPM and the AMI name. to launch an instance with Packer, install the HTM-IT RPM from 1 products, runs integration tests, if green then stamps AMI """ try: jsonArgs = jsonArgs or {} parsedArgs = addAndParseArgs(jsonArgs) amiName = parsedArgs["amiName"] if not (os.environ.get("AWS_ACCESS_KEY_ID") and os.environ.get("AWS_SECRET_ACCESS_KEY")): g_logger.error("Please set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY") raise MissingAWSKeysInEnvironment("AWS keys are not set") else: g_config["AWS_ACCESS_KEY_ID"] = os.environ["AWS_ACCESS_KEY_ID"] g_config["AWS_SECRET_ACCESS_KEY"] = os.environ["AWS_SECRET_ACCESS_KEY"] artifactsDir = createOrReplaceArtifactsDir(logger=g_logger) g_logger.info("Creating the Ami") pipeLineSrc = os.path.join(os.environ["PRODUCTS"], "htm-it", "htm-it", "pipeline", "src") with changeToWorkingDir(pipeLineSrc): g_logger.info("\n\n########## Baking AMI ##########") g_logger.debug("########## AMI Name: %s ##########", amiName) # Baking AMI takes around 15 mins, so print as it runs so we see # progress in the jenkins console during the run runWithOutput("./bake_ami %s" % amiName, env=os.environ, logger=g_logger) amiIDPath = os.path.join(os.getcwd(), "ami.txt") with open(amiIDPath, "r") as amiFileHandler: readAmiId = (amiFileHandler.readline()).split(":") amiID = readAmiId[1].strip() g_logger.info("AMI ID generated is: %s", amiID) buildNumber = getBuildNumber(logger=g_logger) artifactAmiIdPath = os.path.join(artifactsDir, "ami_%s.txt" % buildNumber) shutil.copy(amiIDPath, artifactAmiIdPath) print "#############################################################" print "Running the Integration Tests" runIntegrationTestScriptPath = os.path.join(os.environ["PRODUCTS"], "htm-it", "htm-it", "pipeline", "src") runIntegrationTestCommand = ("python " + "%s/run_htm-it_integration_tests.py" % runIntegrationTestScriptPath + " --ami " + amiID) if parsedArgs["pipelineJson"]: runIntegrationTestCommand += (" --pipeline-json %s" % parsedArgs["pipelineJson"]) g_logger.info(runIntegrationTestCommand) runWithOutput(runIntegrationTestCommand, env=os.environ, logger=g_logger) #Load the json file again and check the status of test with open(parsedArgs["pipelineJson"]) as jsonFile: params = json.load(jsonFile) integrationTestStatus = params.get("integration_test").get("testStatus") # Upload the ami-id to S3 if the pipeline was triggred with production # forks. if integrationTestStatus: g_logger.info("Uploading %s to S3 which contains the generated AMI: %s", os.path.basename(artifactAmiIdPath), amiID) uploadToS3(config=g_config, filePath=artifactAmiIdPath, s3Folder="stable_ami", logger=g_logger) except TestsFailed: g_logger.error("There was a failure executing the HTM-IT integration tests") raise except PipelineError: g_logger.exception("External process failed while baking the AMI") raise except Exception: g_logger.exception("Unknown error occurred while baking the AMI") raise
def launchInstance(amiID, config, logger): """ Launch an instance using the AMI-id and other options. Wait until the instance is up and return the instance-id and it"s public dns. :param instanceId: The Instance ID of the EC2 Instance that will be stopped. :param config: A dict containing values for `REGION`, `AWS_ACCESS_KEY_ID`, and `AWS_SECRET_ACCESS_KEY`. It also needs: - `KEY` = The SSH pub key to use for initialization (e.g.: chef_west) - `INSTANCE_TYPE` = The size of EC2 instance to use (e.g.: m3.medium) - `JOB_NAME` = used to tag the instance launched - `BUILD_NUMBER` = used to tag the instance launched :param logger: An initialized logger from the calling pipeline. :raises: infrastructure.utilities.exceptions.InstanceLaunchError If the instance fails to launch in a set amount of time. :returns: A tuple containing the public DNS entry for the server and the EC2 Instance ID, in that order. """ # Make sure we have the right security groups setup for the instance. We use # `basic_server` to allow SSH access from our office and specified secure IPs. # `grok_server` allows access to the server from ports 80 & 443 universally, # but can be limited if necessary. if config["REGION"] == "us-west-2": securityGroups = ["basic_server", "grok_server"] if config["REGION"] == "us-east-1": securityGroups = ["basic_server", "grok_server_east"] conn = getEC2Connection(config) image = conn.get_image(amiID) logger.info("Launching instance from AMI: %s", amiID) reservation = image.run(key_name=config["KEY"], security_groups=securityGroups, instance_type=config["INSTANCE_TYPE"]) instance = reservation.instances[0] instanceID = instance.id logger.debug("Waiting for instance %s to boot.", instanceID) for _ in xrange(MAX_RETRIES_FOR_INSTANCE_READY): logger.debug("Instance state: %s", instance.state) if instance.state == "pending": sleep(SLEEP_DELAY) instance.update() elif instance.state == "running": break else: terminateInstance(instanceID, config, logger) raise InstanceLaunchError( "Instance took more than %d seconds to start" % MAX_RETRIES_FOR_INSTANCE_READY * SLEEP_DELAY) publicDnsName = instance.public_dns_name instanceTags = {} if os.environ.get("JENKINS_HOME"): instanceTags["Name"] = "%s-%s" % (os.environ["JOB_NAME"], jenkins.getBuildNumber()) else: instanceTags["Name"] = "running-locally-by-user:%s" % os.getlogin() instanceTags["Description"] = "Testing AMI %s" % (amiID) if "BUILD_URL" in os.environ.keys(): instanceTags["JENKINS_LINK"] = os.environ["BUILD_URL"] if "GIT_BRANCH" in os.environ.keys(): instanceTags["GIT_BRANCH"] = os.environ["GIT_BRANCH"] if "GIT_COMMIT" in os.environ.keys(): instanceTags["GIT_COMMIT"] = os.environ["GIT_COMMIT"] try: conn.create_tags([instanceID], instanceTags) except: terminateInstance(instanceID, config, logger) raise logger.info("Instance %s is running, public dns : %s", instanceID, publicDnsName) return publicDnsName, instanceID
def main(): """ This is the main class. """ args = parseArgs() global g_logger g_logger = initPipelineLogger("run-integration-tests", logLevel=args.logLevel) if not (os.environ.get("AWS_ACCESS_KEY_ID") and os.environ.get("AWS_SECRET_ACCESS_KEY")): g_logger.error( "Please set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY") raise MissingAWSKeysInEnvironment("AWS keys are not set") else: g_config["AWS_ACCESS_KEY_ID"] = os.environ["AWS_ACCESS_KEY_ID"] g_config["AWS_SECRET_ACCESS_KEY"] = os.environ["AWS_SECRET_ACCESS_KEY"] #launching instance with the give AMI publicDnsName, instanceId = launchInstance(args.ami, g_config, g_logger) resultsDir = prepareResultsDir() serverKey = os.path.join("~", ".ssh", g_config["KEY"] + ".pem") # The calls in this function are not signal-safe. However, the expectation is # that making them signal safe would be overly burdensome at this time. If # issues arise later, then we'll figure out what the right approach is at that # time. def handleSignalInterrupt(signal, _frame): g_logger.error("Received interrupt signal %s", signal) if instanceId: g_logger.error("Terminating instance %s", instanceId) terminateInstance(instanceId, g_config, g_logger) signal.signal(signal.SIGINT, handleSignalInterrupt) signal.signal(signal.SIGTERM, handleSignalInterrupt) with settings(host_string=publicDnsName, key_filename=serverKey, user=g_config["USER"], connection_attempts=30, warn_only=True): g_logger.info("Connected to %s using %s.pem", publicDnsName, serverKey) # Run Integration tests try: waitForGrokServerToBeReady(publicDnsName, serverKey, g_config["USER"], g_logger) getApiKey(instanceId, publicDnsName, g_config, g_logger) # TODO remove the exports; keeping them intact for now because some of the # integration tests use the ConfigAttributePatch which reads these values # from environment. runTestCommand = ( "export AWS_ACCESS_KEY_ID=%s" % os.environ["AWS_ACCESS_KEY_ID"] + " && export AWS_SECRET_ACCESS_KEY=%s" % os.environ["AWS_SECRET_ACCESS_KEY"] + " && source /etc/grok/supervisord.vars" + " && cd $GROK_HOME" + " && ./run_tests.sh --integration --language py" + " --results xunit jenkins") run(runTestCommand) g_logger.debug("Retreiving results") get("%s" % (g_remotePath), resultsDir) except Exception: g_logger.exception("Caught exception in run_tests") stopInstance(instanceId, g_config, g_logger) raise else: g_logger.info("Tests have finished.") # Rename the results file to be job specific newResultsFile = "grok_integration_test_results_%s.xml" % getBuildNumber( ) if os.path.exists(os.path.join(resultsDir, "results.xml")): shutil.move(os.path.join(resultsDir, "results.xml"), os.path.join(resultsDir, newResultsFile)) if os.path.exists(os.path.join(resultsDir, newResultsFile)): successStatus = analyzeResults("%s/%s" % (resultsDir, newResultsFile)) else: g_logger.error("Could not find results file: %s", newResultsFile) successStatus = False if args.pipelineJson: with open(args.pipelineJson) as jsonFile: pipelineParams = json.load(jsonFile) pipelineParams["integration_test"] = { "testStatus": successStatus } with open(args.pipelineJson, "w") as jsonFile: jsonFile.write( json.dumps(pipelineParams, ensure_ascii=False)) if successStatus: postTestRunAction(instanceId, terminate=True, **g_config) else: postTestRunAction(instanceId, terminate=False, **g_config) raise TestsFailed("Integration test failed")
def main(jsonArgs=None): """ Creates an AMI using a YOMP RPM for a given SHA. 1) Downloads the YOMP RPM corresponding to a given SHA to local disk 2) Calls bake_ami.sh with the name of the YOMP RPM and the AMI name. to launch an instance with Packer, install the YOMP RPM from 1 products, runs integration tests, if green then stamps AMI """ try: jsonArgs = jsonArgs or {} parsedArgs = addAndParseArgs(jsonArgs) amiName = parsedArgs["amiName"] if not (os.environ.get("AWS_ACCESS_KEY_ID") and os.environ.get("AWS_SECRET_ACCESS_KEY")): g_logger.error( "Please set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY") raise MissingAWSKeysInEnvironment("AWS keys are not set") else: g_config["AWS_ACCESS_KEY_ID"] = os.environ["AWS_ACCESS_KEY_ID"] g_config["AWS_SECRET_ACCESS_KEY"] = os.environ[ "AWS_SECRET_ACCESS_KEY"] artifactsDir = createOrReplaceArtifactsDir() g_logger.info("Creating the Ami") pipeLineSrc = os.path.join(os.environ["PRODUCTS"], "YOMP", "YOMP", "pipeline", "src") with changeToWorkingDir(pipeLineSrc): g_logger.info("\n\n########## Baking AMI ##########") g_logger.debug("########## AMI Name: %s ##########", amiName) # Baking AMI takes around 15 mins, so print as it runs so we see # progress in the jenkins console during the run runWithOutput("./bake_ami %s" % amiName, env=os.environ, logger=g_logger) amiIDPath = os.path.join(os.getcwd(), "ami.txt") with open(amiIDPath, "r") as amiFileHandler: readAmiId = (amiFileHandler.readline()).split(":") amiID = readAmiId[1].strip() g_logger.info("AMI ID generated is: %s", amiID) buildNumber = getBuildNumber() artifactAmiIdPath = os.path.join(artifactsDir, "ami_%s.txt" % buildNumber) shutil.copy(amiIDPath, artifactAmiIdPath) print "#############################################################" print "Running the Integration Tests" runIntegrationTestScriptPath = os.path.join(os.environ["PRODUCTS"], "YOMP", "YOMP", "pipeline", "src") runIntegrationTestCommand = ( "python " + "%s/run_YOMP_integration_tests.py" % runIntegrationTestScriptPath + " --ami " + amiID) if parsedArgs["pipelineJson"]: runIntegrationTestCommand += (" --pipeline-json %s" % parsedArgs["pipelineJson"]) g_logger.info(runIntegrationTestCommand) runWithOutput(runIntegrationTestCommand, env=os.environ, logger=g_logger) #Load the json file again and check the status of test with open(parsedArgs["pipelineJson"]) as jsonFile: params = json.load(jsonFile) integrationTestStatus = params.get("integration_test").get( "testStatus") # Upload the ami-id to S3 if the pipeline was triggred with production # forks. if integrationTestStatus: g_logger.info( "Uploading %s to S3 which contains the generated AMI: %s", os.path.basename(artifactAmiIdPath), amiID) uploadToS3(config=g_config, filePath=artifactAmiIdPath, s3Folder="stable_ami", logger=g_logger) except TestsFailed: g_logger.error( "There was a failure executing the YOMP integration tests") raise except PipelineError: g_logger.exception("External process failed while baking the AMI") raise except Exception: g_logger.exception("Unknown error occurred while baking the AMI") raise
def main(): """ This is the main class. """ args = parseArgs() global g_logger g_logger = initPipelineLogger("run-integration-tests", logLevel=args.logLevel) if not (os.environ.get("AWS_ACCESS_KEY_ID") and os.environ.get("AWS_SECRET_ACCESS_KEY")): g_logger.error("Please set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY") raise MissingAWSKeysInEnvironment("AWS keys are not set") else: g_config["AWS_ACCESS_KEY_ID"] = os.environ["AWS_ACCESS_KEY_ID"] g_config["AWS_SECRET_ACCESS_KEY"] = os.environ["AWS_SECRET_ACCESS_KEY"] #launching instance with the give AMI publicDnsName, instanceId = launchInstance(args.ami, g_config, g_logger) resultsDir = prepareResultsDir() serverKey = os.path.join("~", ".ssh", g_config["KEY"] + ".pem") # The calls in this function are not signal-safe. However, the expectation is # that making them signal safe would be overly burdensome at this time. If # issues arise later, then we'll figure out what the right approach is at that # time. def handleSignalInterrupt(signal, _frame): g_logger.error("Received interrupt signal %s", signal) if instanceId: g_logger.error("Terminating instance %s", instanceId) terminateInstance(instanceId, g_config, g_logger) signal.signal(signal.SIGINT, handleSignalInterrupt) signal.signal(signal.SIGTERM, handleSignalInterrupt) with settings(host_string=publicDnsName, key_filename=serverKey, user=g_config["USER"], connection_attempts=30, warn_only=True): g_logger.info("Connected to %s using %s.pem", publicDnsName, serverKey) # Run Integration tests try: waitForGrokServerToBeReady(publicDnsName, serverKey, g_config["USER"], g_logger) getApiKey(instanceId, publicDnsName, g_config, g_logger) # TODO remove the exports; keeping them intact for now because some of the # integration tests use the ConfigAttributePatch which reads these values # from environment. runTestCommand = ("export AWS_ACCESS_KEY_ID=%s" % os.environ["AWS_ACCESS_KEY_ID"] + " && export AWS_SECRET_ACCESS_KEY=%s" % os.environ["AWS_SECRET_ACCESS_KEY"] + " && source /etc/grok/supervisord.vars" + " && cd $GROK_HOME" + " && ./run_tests.sh --integration --language py" + " --results xunit jenkins") run(runTestCommand) g_logger.debug("Retreiving results") get("%s" % (g_remotePath), resultsDir) except Exception: g_logger.exception("Caught exception in run_tests") stopInstance(instanceId, g_config, g_logger) raise else: g_logger.info("Tests have finished.") # Rename the results file to be job specific newResultsFile = "grok_integration_test_results_%s.xml" % getBuildNumber() if os.path.exists(os.path.join(resultsDir, "results.xml")): shutil.move(os.path.join(resultsDir, "results.xml"), os.path.join(resultsDir, newResultsFile)) if os.path.exists(os.path.join(resultsDir, newResultsFile)): successStatus = analyzeResults("%s/%s" % (resultsDir, newResultsFile)) else: g_logger.error("Could not find results file: %s", newResultsFile) successStatus = False if args.pipelineJson: with open(args.pipelineJson) as jsonFile: pipelineParams = json.load(jsonFile) pipelineParams["integration_test"] = {"testStatus": successStatus} with open(args.pipelineJson, "w") as jsonFile: jsonFile.write(json.dumps(pipelineParams, ensure_ascii=False)) if successStatus: postTestRunAction(instanceId, terminate=True, **g_config) else: postTestRunAction(instanceId, terminate=False, **g_config) raise TestsFailed("Integration test failed")
def launchInstance(amiID, config, logger): """ Launch an instance using the AMI-id and other options. Wait until the instance is up and return the instance-id and it"s public dns. :param instanceId: The Instance ID of the EC2 Instance that will be stopped. :param config: A dict containing values for `REGION`, `AWS_ACCESS_KEY_ID`, and `AWS_SECRET_ACCESS_KEY`. It also needs: - `KEY` = The SSH pub key to use for initialization (e.g.: chef_west) - `INSTANCE_TYPE` = The size of EC2 instance to use (e.g.: m3.medium) - `JOB_NAME` = used to tag the instance launched - `BUILD_NUMBER` = used to tag the instance launched :param logger: An initialized logger from the calling pipeline. :raises: infrastructure.utilities.exceptions.InstanceLaunchError If the instance fails to launch in a set amount of time. :returns: A tuple containing the public DNS entry for the server and the EC2 Instance ID, in that order. """ # Make sure we have the right security groups setup for the instance. We use # `basic_server` to allow SSH access from our office and specified secure IPs. # `htm_it_server` allows access to the server from ports 80 & 443 universally, # but can be limited if necessary. if config["REGION"] == "us-west-2": securityGroups = ["basic_server", "htm_it_server"] if config["REGION"] == "us-east-1": securityGroups = ["basic_server", "htm_it_server_east"] conn = getEC2Connection(config) image = conn.get_image(amiID) logger.info("Launching instance from AMI: %s", amiID) reservation = image.run(key_name=config["KEY"], security_groups=securityGroups, instance_type=config["INSTANCE_TYPE"]) instance = reservation.instances[0] instanceID = instance.id try: logger.debug("Waiting for instance %s to boot.", instanceID) for _ in xrange(MAX_RETRIES_FOR_INSTANCE_READY): logger.debug("Instance state: %s", instance.state) if instance.state == "pending": sleep(SLEEP_DELAY) try: instance.update() except boto.exception.EC2ResponseError as exc: # InvalidInstanceID.NotFound may occur because the ID of a recently # created instance has not propagated through the system (due to # eventual consistency) if exc.error_code != "InvalidInstanceID.NotFound": raise logger.debug("launchInstance: suppressing transient " "InvalidInstanceID.NotFound on instance=%s", instanceID) if instance.state == "running": break else: raise InstanceLaunchError("Instance took more than %d seconds to start" % MAX_RETRIES_FOR_INSTANCE_READY * SLEEP_DELAY) except: # pylint: disable=W0702 # Preserve the original exception and traceback during cleanup try: raise finally: logger.exception("Terminating instance=%s that failed to reach running " "state; state=%s", instanceID, instance.state) try: terminateInstance(instanceID, config, logger) except Exception: # pylint: disable=W0703 # Suppress secondary non-system-exiting exception in favor of the # original exception logger.exception("Termination of instance=%s failed", instanceID) publicDnsName = instance.public_dns_name instanceTags = {} if os.environ.get("JENKINS_HOME"): instanceTags["Name"] = "%s-%s" % (os.environ["JOB_NAME"], jenkins.getBuildNumber(logger=logger)) else: instanceTags["Name"] = "running-locally-by-user:%s" % os.getlogin() instanceTags["Description"] = "Testing AMI %s" % (amiID) if "BUILD_URL" in os.environ.keys(): instanceTags["JENKINS_LINK"] = os.environ["BUILD_URL"] if "GIT_BRANCH" in os.environ.keys(): instanceTags["GIT_BRANCH"] = os.environ["GIT_BRANCH"] if "GIT_COMMIT" in os.environ.keys(): instanceTags["GIT_COMMIT"] = os.environ["GIT_COMMIT"] try: conn.create_tags([instanceID], instanceTags) except: terminateInstance(instanceID, config, logger) raise logger.info("Instance %s is running, public dns : %s", instanceID, publicDnsName) return publicDnsName, instanceID
terminateInstance) from infrastructure.utilities.exceptions import ( MissingAWSKeysInEnvironment, TestsFailed ) from infrastructure.utilities.grok_server import ( getApiKey, waitForGrokServerToBeReady) from infrastructure.utilities.jenkins import getWorkspace from infrastructure.utilities.logger import initPipelineLogger # Prepare configuration g_config = yaml.load(resource_stream(__name__, "../conf/config.yaml")) g_config["JOB_NAME"] = os.environ.get("JOB_NAME", "Local Run") g_config["BUILD_NUMBER"] = jenkins.getBuildNumber() g_dirname = os.path.abspath(os.path.dirname(__file__)) g_remotePath = "/opt/numenta/grok/tests/results/py2/xunit/jenkins/results.xml" g_rpmBuilder = "rpmbuild.groksolutions.com" g_s3RepoPath = "/opt/numenta/s3repo/s3/x86_64" s3Bucket = "public.numenta.com" FIRST_BOOT_RUN_TRIES = 18 GROK_SERVICES_TRIES = 6 GROK_AWS_CREDENTIALS_SETUP_TRIES = 30 SLEEP_DELAY = 10 def analyzeResults(resultsPath): """