def buildNuPICCore(env, nupicCoreSHA, logger): """ Builds nupic.core :param env: The environment which will be set before building. :param nupicCoreSHA: The SHA which will be built. :raises infrastructure.utilities.exceptions.NupicBuildFailed: This exception is raised if build fails. """ print "\n----------Building nupic.core------------" log.printEnv(env, logger) with changeToWorkingDir(env["NUPIC_CORE_DIR"]): try: logger.debug("Building nupic.core SHA : %s ", nupicCoreSHA) git.resetHard(nupicCoreSHA) runWithOutput("mkdir -p build/scripts", env, logger) with changeToWorkingDir("build/scripts"): runWithOutput("cmake ../../src -DCMAKE_INSTALL_PREFIX=../release", env, logger) runWithOutput("make -j 4", env, logger) runWithOutput("make install", env, logger) except CommandFailedError: raise NupicBuildFailed("nupic.core building failed.Exiting") except: raise PipelineError("nupic.core building failed due to unknown reason.") else: logger.info("nupic.core building was successful.")
def runTests(env, logger): """ Runs NuPIC tests. :param env: The environment which will be set for runnung tests. :raises: infrastructure.utilities.exceptions.NupicBuildFailed if the given SHA is not found. """ logger.debug("Running NuPIC Tests.") with changeToWorkingDir(env["NUPIC"]): try: log.printEnv(env, logger) runWithOutput("bin/py_region_test", env, logger) testCommand = "scripts/run_nupic_tests -u --coverage --results xml" runWithOutput(testCommand, env, logger) except: logger.exception("NuPIC Tests have failed.") raise else: resultFile = glob.glob("%s/tests/results/xunit/*/*.xml" % env["NUPIC"])[0] logger.debug("Copying results to results folder.") shutil.move(resultFile, createOrReplaceResultsDir()) logger.info("NuPIC tests have passed")
def runWithOutput(command, env=None, logger=None): """ Run a command, printing as the command executes. @param command: Command to run. Can be str or list @param env: environment variables to use while running command @param printEnv: Whether or not to print the environment passed to command @param logger: optional logger for additional debug info if desired """ try: if env is None: env = os.environ if logger is not None: diagnostics.printEnv(env, logger) logger.debug("**********> %s", command) if isinstance(command, basestring): command = command.strip().split(" ") check_call(command, env=env) except CalledProcessError: errMessage = "Failed to execute: %s" % (command,) raise CommandFailedError(errMessage) # Catch other exceptions, add info about what command triggered them except Exception as e: errMessage = "Failed to execute: %s; original=%r" % (command, e,) if logger is not None: logger.exception(errMessage) raise
def runWithOutput(command, env=None, logger=None): """ Run a command, printing as the command executes. @param command: Command to run. Can be str or list @param env: environment variables to use while running command @param printEnv: Whether or not to print the environment passed to command @param logger: optional logger for additional debug info if desired """ try: if env is None: env = os.environ if logger is not None: diagnostics.printEnv(env, logger) logger.debug("**********> %s", command) if isinstance(command, basestring): command = command.strip().split(" ") check_call(command, env=env) except CalledProcessError: errMessage = "Failed to execute: %s" % (command, ) raise CommandFailedError(errMessage) # Catch other exceptions, add info about what command triggered them except Exception as e: errMessage = "Failed to execute: %s; original=%r" % ( command, e, ) if logger is not None: logger.exception(errMessage) raise
def executeCommand(command, env=os.environ, logger=None): """ Execute a command and return the raw output @param command: String or list containing the exact command to execute. @param env: The environment required to execute the command which is passed. By default use os.environ @param logger: logger for additional debug info if desired @raises infrastructure.utilities.exceptions.CommandFailedError if the command fails @returns: A str representing the raw output of the command @rtype: string """ try: if logger: log.printEnv(env, logger) logger.debug(command) if isinstance(command, basestring): command = command.strip().split(" ") return check_output(command, env=env).strip() except CalledProcessError: raise CommandFailedError("Failed to execute command: %s", command)
def executeCommand(command, env=None, logger=None): """ Execute a command and return the raw output @param command: String or list containing the exact command to execute. @param env: The environment required to execute the command which is passed. By default use os.environ @param printEnv: whether or not to print the environment passed to command @param logger: logger for additional debug info if desired @raises infrastructure.utilities.exceptions.CommandFailedError if the command fails @returns: A str representing the raw output of the command @rtype: string """ try: if env is None: env = os.environ if logger is not None: diagnostics.printEnv(env, logger) logger.debug("**********> %s", command) if isinstance(command, basestring): command = command.strip().split(" ") return check_output(command, env=env).strip() except CalledProcessError as e: errMessage = "Failed to execute: %s; original=%r" % (command, e,) raise CommandFailedError(errMessage)
def buildNuPICCore(env, nupicCoreSHA, logger): """ Builds nupic.core :param env: The environment which will be set before building. :param nupicCoreSHA: The SHA which will be built. :raises infrastructure.utilities.exceptions.NupicBuildFailed: This exception is raised if build fails. """ print "\n----------Building nupic.core------------" log.printEnv(env, logger) with changeToWorkingDir(env["NUPIC_CORE_DIR"]): try: logger.debug("Building nupic.core SHA : %s ", nupicCoreSHA) git.resetHard(nupicCoreSHA) runWithOutput("mkdir -p build/scripts", env, logger) with changeToWorkingDir("build/scripts"): runWithOutput( "cmake ../../src -DCMAKE_INSTALL_PREFIX=../release", env, logger) runWithOutput("make -j 4", env, logger) runWithOutput("make install", env, logger) except CommandFailedError: raise NupicBuildFailed("nupic.core building failed.Exiting") except: raise PipelineError( "nupic.core building failed due to unknown reason.") else: logger.info("nupic.core building was successful.")
def buildNuPIC(env, logger): """ Builds NuPIC :param env: The environment which will be set before building :raises infrastructure.utilities.exceptions.NupicBuildFailed: This exception is raised if build fails. """ print "\n----------Building NuPIC------------" log.printEnv(env, logger) # Build with changeToWorkingDir(env["NUPIC"]): try: try: shutil.rmtree("build") except OSError: # didn't exist, so just pass pass # install requirements runWithOutput( "pip install --install-option=--prefix=%s --requirement " "external/common/requirements.txt" % env["NTA"], env=env, logger=logger) # need to remove this folder for wheel build to work shutil.rmtree("external/linux32arm") # build the wheel command = ( "python setup.py bdist_wheel bdist_egg --nupic-core-dir=%s" % os.path.join(env["NUPIC_CORE_DIR"], "build", "release")) # Building on jenkins, not local if "JENKINS_HOME" in env: command += " upload -r numenta-pypi" runWithOutput(command, env=env, logger=logger) except: logger.exception("Failed while building nupic") raise NupicBuildFailed("NuPIC building failed.") else: open("nupic.stamp", "a").close() logger.debug("NuPIC building was successful.")
def preBuildSetup(env, pipelineConfig): """ Clone the Grok repo if needed and get it set to the right remote, branch, and SHA. Once set, if the NuPIC parameters need to be revised, take care of that now, too. :param env: The environment variable which is set before building :param pipelineConfig: dict of the pipeline config values, e.g.: { "buildWorkspace": "/path/to/build/in", "grokRemote": "[email protected]:Numenta/numenta-apps.git", "grokBranch": "master", "grokSha": "HEAD", "nupicRemote": "[email protected]:numenta/nupic.git", "nupicBranch": "master", "nupicSha": "HEAD", "pipelineParams": "{dict of parameters}", "pipelineJson": "/path/to/json/file", "wheelFilePath": "/path/to/wheel/file" } :returns: The updated pipelineConfig dict :rtype: dict """ log.printEnv(env, g_logger) # Clone Grok if needed, otherwise, setup remote with changeToWorkingDir(pipelineConfig["buildWorkspace"]): if not os.path.isdir(env["GROK_HOME"]): git.clone(pipelineConfig["grokRemote"], directory="products") with changeToWorkingDir(env["GROK_HOME"]): if pipelineConfig["grokSha"]: g_logger.debug("Resetting to %s" % pipelineConfig["grokSha"]) git.resetHard(pipelineConfig["grokSha"]) else: grokSha = git.getShaFromRemoteBranch(pipelineConfig["grokRemote"], pipelineConfig["grokBranch"]) pipelineConfig["grokSha"] = grokSha g_logger.debug("Resetting to %s" % grokSha) git.resetHard(grokSha) wheelFilePath = downloadOrCreateNuPICWheel(env, pipelineConfig) pipelineConfig["wheelFilePath"] = wheelFilePath
def buildNuPIC(env, logger): """ Builds NuPIC :param env: The environment which will be set before building :raises infrastructure.utilities.exceptions.NupicBuildFailed: This exception is raised if build fails. """ print "\n----------Building NuPIC------------" log.printEnv(env, logger) # Build with changeToWorkingDir(env["NUPIC"]): try: try: shutil.rmtree("build") except OSError: # didn't exist, so just pass pass # install requirements runWithOutput("pip install --install-option=--prefix=%s --requirement " "external/common/requirements.txt" % env["NTA"], env=env, logger=logger) # need to remove this folder for wheel build to work shutil.rmtree("external/linux32arm") # build the wheel command = ("python setup.py bdist_wheel bdist_egg --nupic-core-dir=%s" % os.path.join(env["NUPIC_CORE_DIR"], "build", "release")) # Building on jenkins, not local if "JENKINS_HOME" in env: command += " upload -r numenta-pypi" runWithOutput(command, env=env, logger=logger) except: logger.exception("Failed while building nupic") raise NupicBuildFailed("NuPIC building failed.") else: open("nupic.stamp", "a").close() logger.debug("NuPIC building was successful.")
def runWithOutput(command, env=os.environ, logger=None): """ Run a command, printing as the command executes. @param command: Command to run. Can be str or list @param env: environment variables to use while running command @param logger: optional logger for additional debug info if desired """ try: if logger: log.printEnv(env, logger) logger.debug(command) if isinstance(command, basestring): command = command.strip().split(" ") check_call(command, env=env) except CalledProcessError: raise CommandFailedError("Failed to execute command: %s" % command)
def runUnitTests(env, pipeline, grokSha, logger): """ Runs tests listed in files present at {GROK_HOME}/tests/ci/ :param env: Current environ set for GROK_HOME, etc :param pipeline: name of repository which has triggered this build :param grokSha: grok SHA used current run :returns: return True if tests are successful :rtype: bool """ # Print environment for debug purposes printEnv(env, logger) buildWorkspace = os.environ["BUILD_WORKSPACE"] task = "_".join([pipeline, grokSha, str(uuid.uuid4())]) xunitSuccess = True with open( os.path.join(env["GROK_HOME"], "tests/ci/test_commands_xunit.txt"), "r") as tests: xunitTests = [test.strip() % dict(globals().items() + \ locals().items()) for test in tests] with changeToWorkingDir(os.path.join(buildWorkspace, "products")): g_logger.debug(os.getcwd()) for xunitTest in xunitTests: logger.info("-------Running %s -------" % xunitTest) xunitSuccess = runTestCommand(xunitTest, env, logger) logger.info("\n\n###### COMPLETED %s tests ######\n\n" % xunitTest) if "WORKSPACE" in os.environ: # `WORKSPACE` should only be set by Jenkins and we only want to record # the test results if we're on Jenkins logger.info("\n\n###### Recording Results %s######\n\n" % xunitTest) recordXunitTestsResults(task) if not xunitSuccess: logger.error("-------Failed %s -------" % xunitTest) break return xunitSuccess
def runUnitTests(env, pipeline, nupicSha, grokSha, logger): """ Runs tests listed in files present at {GROK_HOME}/tests/ci/ :param env: Current environ set for GROK_HOME, NUPIC etc :param pipeline: name of repository which has triggered this build :param grokSha: grok SHA used current run :param nupicSha: NuPIC SHA for used current run :returns: return True if tests are successful :rtype: bool """ # Print environment for debug purposes printEnv(env, logger) buildWorkspace = os.environ["BUILD_WORKSPACE"] task = "_".join([pipeline, nupicSha, grokSha, str(uuid.uuid4())]) xunitSuccess = True with open(os.path.join(env["GROK_HOME"], "tests/ci/test_commands_xunit.txt"), "r") as tests: xunitTests = [test.strip() % dict(globals().items() + \ locals().items()) for test in tests] with changeToWorkingDir(os.path.join(buildWorkspace, "products")): g_logger.debug(os.getcwd()) for xunitTest in xunitTests: logger.info("-------Running %s -------" % xunitTest) xunitSuccess = runTestCommand(xunitTest, env, logger) logger.info("\n\n###### COMPLETED %s tests ######\n\n" % xunitTest) if "WORKSPACE" in os.environ: # `WORKSPACE` should only be set by Jenkins and we only want to record # the test results if we're on Jenkins logger.info("\n\n###### Recording Results %s######\n\n" % xunitTest) recordXunitTestsResults(task) if not xunitSuccess: logger.error("-------Failed %s -------" % xunitTest) break return xunitSuccess
def preBuildSetup(env, pipelineConfig): """ Clone the YOMP repo if needed and get it set to the right remote, branch, and SHA. :param env: The environment variable which is set before building :param pipelineConfig: dict of the pipeline config values, e.g.: { "buildWorkspace": "/path/to/build/in", "YOMPRemote": "[email protected]:Numenta/numenta-apps.YOMP", "YOMPBranch": "master", "YOMPSha": "HEAD", "pipelineParams": "{dict of parameters}", "pipelineJson": "/path/to/json/file" } :returns: The updated pipelineConfig dict :rtype: dict """ log.printEnv(env, g_logger) # Clone YOMP if needed, otherwise, setup remote with changeToWorkingDir(pipelineConfig["buildWorkspace"]): if not os.path.isdir(env["YOMP_HOME"]): YOMP.clone(pipelineConfig["YOMPRemote"], directory="products") with changeToWorkingDir(env["YOMP_HOME"]): if pipelineConfig["YOMPSha"]: g_logger.debug("Resetting to %s", pipelineConfig["YOMPSha"]) YOMP.resetHard(pipelineConfig["YOMPSha"]) else: YOMPSha = YOMP.getShaFromRemoteBranch(pipelineConfig["YOMPRemote"], pipelineConfig["YOMPBranch"]) pipelineConfig["YOMPSha"] = YOMPSha g_logger.debug("Resetting to %s", YOMPSha) YOMP.resetHard(YOMPSha)
def executeCommand(command, env=None, logger=None): """ Execute a command and return the raw output @param command: String or list containing the exact command to execute. @param env: The environment required to execute the command which is passed. By default use os.environ @param printEnv: whether or not to print the environment passed to command @param logger: logger for additional debug info if desired @raises infrastructure.utilities.exceptions.CommandFailedError if the command fails @returns: A str representing the raw output of the command @rtype: string """ try: if env is None: env = os.environ if logger is not None: diagnostics.printEnv(env, logger) logger.debug("**********> %s", command) if isinstance(command, basestring): command = command.strip().split(" ") return check_output(command, env=env).strip() except CalledProcessError as e: errMessage = "Failed to execute: %s; original=%r" % ( command, e, ) raise CommandFailedError(errMessage)
def buildRpms(env, YOMPSha, releaseVersion, artifactsDir, logger, config, YOMPRemote): """ Builds an rpm for YOMP Takes the sha according to YOMP and checks that the sha.json file is present (also checks if the rpm is present on rpmbuild and in S3), if not it creates the rpm. :param env: The environment variables which is set. :param YOMPSha: The YOMP sha. :param releaseVersion: The product version which will be used in the name of RPM :param artifactsDir: In this directory the artifacts will be stored. :param config: This is a dict of configuration data here we are using AWS secret and access. :returns: syncRpmStatus(It is list which will help recongnize if RPM's rpm should be synced) and rpmNameDetails(It is a dict which contains the RPM name of YOMP) :raises: infrastructure.utilities.exceptions.MissingRPMError, when RPM is not found. infrastructure.utilities.exceptions.FailedToMoveRPM, if there is some error while moving RPM's to rpmbuild.YOMPsolutions.com """ rpmNameDetails = {} rpmName = "YOMP" try: syncRpm = False sha = YOMPSha rpmExists = checkRpmExists(rpmName, sha, rpmNameDetails, config, logger) with shell_env(**env): if not rpmExists: logger.info("Creating %s rpm.", rpmName) # Clean stale rpms with changeToWorkingDir(OPERATIONS_SCRIPTS): try: # Delete any previously created rpm for name in glob.glob("nta-products-YOMP-*.rpm"): os.remove(name) log.printEnv(env, logger) infrastuctureCommonPath = os.path.join( PRODUCTS_PATH, "infrastructure", "infrastructure") command = ( "%s/create-numenta-rpm" % infrastuctureCommonPath + " --rpm-flavor YOMP" + " --debug" + " --cleanup-script YOMP/YOMP/pipeline/scripts/rpm-creator" + "/clean-YOMP-tree-for-packaging" + " --whitelist YOMP" + " --whitelist nta.utils" + " --whitelist htmengine" + " --whitelist infrastructure" + " --whitelist install-YOMP.sh" + " --base-version " + releaseVersion + " --description YOMP-installed-from-products-repo" + " --rpm-name nta-products-YOMP" + " --tempdir /tmp/YOMPbuild" + " --setup-py-arguments develop" + " --log-level debug" + " --setup-py-dir nta.utils" + " --setup-py-dir htmengine" + " --setup-py-dir infrastructure" + " --extend-pythonpath YOMP/lib/python2.7/site-packages" + " --sha " + YOMPSha + " --artifact opt" + " --YOMP-url " + YOMPRemote) # Due to some environment issue's I have used local here, # we can change this later. # fixme https://jira.numenta.com/browse/TAUR-797 from fabric.api import local local(command) # getting name of the RPM created nameOfRpmCreated = glob.glob( "nta-products-YOMP-*.rpm").pop() if not nameOfRpmCreated: raise exceptions.MissingRPMError( "%s rpm name not found exiting" % rpmName) # Creating artifact with open("%s.txt" % rpmName, "w") as fp: fp.write(nameOfRpmCreated) logger.info( "\n\n######### %s RPM created #########\n\n" % rpmName) except: raise exceptions.RPMBuildingError( "Failed while creating %s RPM." % rpmName) else: syncRpm = True filename = os.path.join(OPERATIONS_SCRIPTS, "%s.txt" % rpmName) # updating rpm details rpmNameDetails.update({rpmName: nameOfRpmCreated}) # moving the rpms name to artifacts directory move(filename, artifactsDir) shaFileName = createShaFile(nameOfRpmCreated, sha) # move rpmname to rpmbuild status = moveRpmsToRpmbuild(nameOfRpmCreated, config, logger) if status: uploadShaFiletoBucket(rpmName, shaFileName, logger) # deleting the rpm after copying to rpmbuild os.remove("%s/%s" % (OPERATIONS_SCRIPTS, nameOfRpmCreated)) else: raise exceptions.FailedToMoveRPM("Failed to move rpms to " "rpmbuilder machine") else: logger.info( "RPM for %s with %s sha already exists," "skipping creation of rpm!!", rpmName, sha) return syncRpm, rpmNameDetails except Exception: logger.exception("RPM building failed.") raise
def buildRpms(env, grokSha, releaseVersion, artifactsDir, logger, config, grokRemote): """ Builds an rpm for grok that contains an embedded NuPIC wheel Takes the sha according to grok or nupic and checks that the sha.json file is present (also checks if the rpm is present on rpmbuild and in S3), if not it creates the rpm. :param env: The environment variables which is set. :param grokSha: The grok sha. :param releaseVersion: The product version which will be used in the name of RPM :param artifactsDir: In this directory the artifacts will be stored. :param config: This is a dict of configuration data here we are using AWS secret and access. :returns: syncRpmStatus(It is list which will help recongnize if RPM's rpm should be synced) and rpmNameDetails(It is a dict which contains the RPM name of Grok and NuPIC) :raises: infrastructure.utilities.exceptions.MissingRPMError, when RPM is not found. infrastructure.utilities.exceptions.FailedToMoveRPM, if there is some error while moving RPM's to rpmbuild.groksolutions.com """ rpmNameDetails = {} rpmName = "grok" try: syncRpm = False sha = grokSha rpmExists = checkRpmExists(rpmName, sha, rpmNameDetails, config, logger) with shell_env(**env): if not rpmExists: logger.info("Creating %s rpm.", rpmName) # Download the NuPIC wheel # TODO: Make this use the real nupicSHA, not just default to tip of # stable wheelFilePath = s3.downloadNuPICWheel(sha=None, ) # Clean stale rpms with changeToWorkingDir(OPERATIONS_SCRIPTS): try: # Delete any previously created rpm for name in glob.glob("nta-products-grok-*.rpm"): os.remove(name) log.printEnv(env, logger) infrastuctureCommonPath = os.path.join(PRODUCTS_PATH, "infrastructure", "infrastructure") command = ("%s/create-numenta-rpm" % infrastuctureCommonPath + " --rpm-flavor grok" + " --install-wheel " + wheelFilePath + " --debug" + " --postinstall-script post_install_grok" + " --cleanup-script grok/grok/pipeline/scripts/rpm-creator" + "/clean-grok-tree-for-packaging" + " --whitelist grok" + " --whitelist nta.utils" + " --whitelist htmengine" + " --whitelist install-grok.sh" + " --base-version " + releaseVersion + " --description Grok-installed-from-products-repo" + " --rpm-name nta-products-grok" + " --tempdir /tmp/grokbuild" + " --setup-py-arguments develop" + " --log-level debug" + " --setup-py-dir nta.utils" + " --setup-py-dir htmengine" + " --extend-pythonpath grok/lib/python2.7/site-packages" + " --sha " + grokSha + " --artifact opt" + " --git-url " + grokRemote) # Due to some environment issue's I have used local here, # we can change this later. # fixme https://jira.numenta.com/browse/TAUR-797 from fabric.api import local local(command) # getting name of the RPM created nameOfRpmCreated = glob.glob("nta-products-grok-*.rpm").pop() if not nameOfRpmCreated: raise exceptions.MissingRPMError("%s rpm name not found exiting" % rpmName) # Creating artifact with open("%s.txt" % rpmName, "w") as fp: fp.write(nameOfRpmCreated) logger.info("\n\n######### %s RPM created #########\n\n" % rpmName) except: raise exceptions.RPMBuildingError("Failed while creating %s RPM." % rpmName) else: syncRpm = True filename = os.path.join(OPERATIONS_SCRIPTS, "%s.txt" % rpmName) # updating rpm details rpmNameDetails.update({rpmName:nameOfRpmCreated}) # moving the rpms name to artifacts directory move(filename, artifactsDir) shaFileName = createShaFile(nameOfRpmCreated, sha) # move rpmname to rpmbuild status = moveRpmsToRpmbuild(nameOfRpmCreated, config, logger) if status: uploadShaFiletoBucket(rpmName, shaFileName, logger) # deleting the rpm after copying to rpmbuild os.remove("%s/%s" % (OPERATIONS_SCRIPTS, nameOfRpmCreated)) else: raise exceptions.FailedToMoveRPM("Failed to move rpms to " "rpmbuilder machine") else: logger.info("RPM for %s with %s sha already exists," "skipping creation of rpm!!", rpmName, sha) return syncRpm, rpmNameDetails except Exception: logger.exception("RPM building failed.") raise