Example #1
0
def readS3Contents(bucket, path, connection, logger):
  """
  Read an arbitrary S3 object and return the contents.

  :param bucket: Name of S3 bucket to read from. Required.

  :param path: path in S3. Required.

  :param connection: a boto connection object. Required.

  :returns: data for the s3 object pointed to by path

  :rtype data:
  """
  if not bucket:
    raise InvalidParametersError("No bucket object specified.")
  if not connection:
    raise InvalidParametersError("No boto connection argument")
  if not logger:
    raise InvalidParametersError("No logger specified")
  if not path:
    raise InvalidParametersError("No path specified")

  logger.debug("Connecting to S3 bucket %s", bucket)
  bucket = connection.get_bucket(bucket, validate=False)

  key = boto.s3.key.Key(bucket)
  key.key = path
  key.open()

  logger.debug("Reading %s/%s", bucket, path)
  s3value = key.read()
  logger.debug("value: %s", s3value)
  return s3value
Example #2
0
def getInstances(region="us-west-2",
                 awsAccessKeyId=None,
                 awsSecretAccessKey=None,
                 logger=None):
  """
    List all the instances.

    :param region: AWS region
    :param awsAccessKeyId: AWS access key ID
    :param awsSecretAccessKey: AWS secret access key
    :param logger: An initialized logger from the calling pipeline.

    :returns: A list of instances
    :rtype: list
  """
  if not logger:
    raise InvalidParametersError("getInstances: Missing logger")
  if not awsAccessKeyId:
    raise InvalidParametersError("getInstances: Missing awsAccessKeyId")
  if not awsSecretAccessKey:
    raise InvalidParametersError("getInstances: Missing awsSecretAccessKey")

  config = {}
  config["REGION"] = region
  config["AWS_ACCESS_KEY_ID"] = awsAccessKeyId
  config["AWS_SECRET_ACCESS_KEY"] = awsSecretAccessKey
  conn = getEC2Connection(config)
  instances = []
  reservations = conn.get_all_reservations()
  for reservation in reservations:
    for instance in reservation.instances:
      instances.append(instance)
  if not instances:
    logger.debug("No instances available for the given credentials.")
  return instances
Example #3
0
def loadInstanceTags(instanceId,
                     logger,
                     awsAccessKeyId,
                     awsSecretAccessKey,
                     region=DEFAULT_REGION):
    """
  Read the tags from a given instanceID

  :param instanceId: Instance ID to read

  :param logger: Initialized logger object

  :param awsAccessKey: AWS access key. Required.

  :param awsSecretAccessKey: AWS secret access key. Required.

  :param region: AWS region the instance is in

  :returns: tags for an instance ID

  :rtype: boto instance tags object

  :raises CommandFailedError if it can't find the instanceID
  :raises InstanceNotFoundError if instanceId is not found in region
  :raises InvalidParametersError if the arguments fail sanity check
  """
    # Sanity check arguments
    if not logger:
        raise InvalidParametersError("loadInstanceTags requires a logger")
    if not awsAccessKeyId:
        raise InvalidParametersError("loadInstanceTags:Missing awsAccessKeyId")
    if not awsSecretAccessKey:
        raise InvalidParametersError(
            "loadInstanceTags:Missing awsSecretAccessKey")

    ec2Config = {
        "AWS_ACCESS_KEY_ID": awsAccessKeyId,
        "AWS_SECRET_ACCESS_KEY": awsSecretAccessKey,
        "REGION": region
    }

    conn = getEC2Connection(config=ec2Config)

    # Load info for the instance
    reservations = conn.get_all_reservations(
        filters={"instance-id": instanceId})

    try:
        instance = reservations[0].instances[0]
        return instance.tags
    except IndexError:
        raise InstanceNotFoundError("Could not find instance %s in %s",
                                    instanceId, region)
Example #4
0
 def __init__(self, config):
     # convert dict to object
     if isinstance(config, dict):
         tmpConfig = type('Config', (), {})()
         for k, v in config.items():
             setattr(tmpConfig, k, v)
         config = tmpConfig
     failmsg = None
     if config.sitePackagesTarball:
         if config.flavor != "grok":
             failmsg = "--site-packages is only used for grok packages."
     if config.flavor == None:
         failmsg = "You must set a type of rpm to create with --rpm-flavor"
     if config.artifacts == []:
         failmsg = "You must specify artifacts in the fakeroot to package."
         if config.flavor == "grok":
             failmsg = failmsg + " Grok rpms should specify opt"
         if config.flavor == "infrastructure":
             failmsg = failmsg + " Infrastructure rpms should specify opt"
         if config.flavor == "saltcellar":
             failmsg = failmsg + " Saltcellar rpms should specify srv"
     if failmsg:
         raise InvalidParametersError(failmsg)
     self.config = config
     self.environment = dict(os.environ)
     self.fakeroot = None
     self.logger = diagnostics.initPipelineLogger(name="create-numenta-rpm",
                                                  logLevel=config.logLevel)
     self.productsDirectory = None
Example #5
0
    def cleanScripts(self):
        """
    Cleans the grok directory before packaging.
    """

        productsDirectory = self.productsDirectory
        config = self.config
        environment = self.environment
        logger = self.logger

        logger.info("Running cleanup scripts...")
        for cleaner in config.cleanupScripts:
            cleanerPath = os.path.join(productsDirectory, cleaner)
            workDirectory = os.path.dirname(cleanerPath)
            logger.debug("Changing to %s", workDirectory)
            logger.debug("Attempting to run %s", cleanerPath)
            if not os.path.exists(cleanerPath):
                raise InvalidParametersError("%s does not exist!" %
                                             cleanerPath)
            with changeToWorkingDir(workDirectory):
                runWithOutput("%s %s" % (cleanerPath, "--destroy-all-my-work"),
                              env=environment)
Example #6
0
def downloadNuPICWheel(sha=None, downloadDirectory="/tmp", logger=None):
    """
    Downloads a NuPIC wheel from S3 for a given SHA.

    If no NuPIC SHA is provided then the wheel version is read from
    "stable_nupic_version/nupic-package-version.txt" and if a SHA is
    provided then the wheel version is read from "stable_nupic_version/<SHA>".

    :param downloadDirectory: The directory to download the wheel into
    :raises InvalidParametersError: If no NuPIC wheel exists for the given SHA
    :returns: The absolute path to the NuPIC wheel.
    :rtype: string
  """
    if sha:
        path = "stable_nupic_version/%s" % sha
    else:
        path = "stable_nupic_version/nupic-package-version.txt"
    bucketName = "builds.numenta.com"

    try:
        with open(downloadFileFromS3(bucketName, path, logger),
                  "r") as fHandle:
            contents = fHandle.readline().strip()
            nupicSHA = contents.split(":")[0].strip()
            wheelFile = contents.split(":")[1].strip()
    except AttributeError:
        if logger:
            g_logger.debug("NuPIC wheel for %s not found in S3", nupicSha)
        raise InvalidParametersError("NuPIC wheel for %s not found" % nupicSha)
    else:
        if logger:
            g_logger.debug("Downloading NuPIC wheel from S3: %s" % wheelFile)
        with changeToWorkingDir(downloadDirectory):
            wheelFilePath = downloadFileFromS3(bucketName=bucketName,
                                               path="builds_nupic_wheel/%s" %
                                               wheelFile,
                                               logger=logger)

    return wheelFilePath
Example #7
0
    def constructGrokFakeroot(self):
        """
    Construct a Grok fakeroot directory tree.

    1. Add any directories specified with --extend-pythonpath to the PYTHONPATH
       we will be using for setup.py, build scripts and the cleanup scripts.

    2. Install any wheels that have been specied by --use-wheel

    3. Run setup.py in any directories that have been specified with
       --setup-py-dir. Uses the arguments specfied by --setup-py-arguments.

    4. Run any build scripts specified by --build-script

    5. Run any cleanup scripts specified by --cleanup-script

    6. Purge any files or directories at the top level of the checkout that were
       not whitelisted with --whitelist.

    :returns: (iteration, actualSHA) where iteration is the total commit count
    in the repository and fakerootSHA is the SHA in the fakeroot. If we're
    packaging a branch or tip of master, we're still going to want to know what
    the SHA was so we can include it in the RPM description.

    :rtype: tuple
    """

        config = self.config
        fakeroot = self.fakeroot
        logger = self.logger

        logger.info("Preparing Grok fakeroot in %s\n", fakeroot)

        actualSHA = self.installProductsIntoGrokFakeroot()

        productsDirectory = self.productsDirectory
        grokPath = os.path.join(productsDirectory, "grok")
        iteration = git.getCommitCount(productsDirectory)

        # Extend PYTHONPATH for setup.py, build & cleanup scripts
        # pythonpathExtensions
        logger.debug("**************************************************")
        logger.info("Phase 1: Preparing PYTHONPATH and installing wheels")
        # Set extra python path
        self.setPythonPath()
        environment = self.environment
        sitePackagesDirectory = "%s/grok/lib/python2.7/site-packages" % \
                                productsDirectory

        # Install wheels if any have been specified
        with changeToWorkingDir(grokPath):
            for wheel in config.wheels:
                logger.info("Installing %s", os.path.basename(wheel))
                if not os.path.exists(wheel):
                    raise InvalidParametersError("%s does not exist!" % wheel)
                pipCommand = "pip install %s --no-deps --target=%s" % \
                  (wheel, sitePackagesDirectory)
                logger.debug("pip command: %s", pipCommand)
                runWithOutput(pipCommand)
                logger.debug("wheel install complete")

        # Run setup.py if specified
        logger.info("Phase 2: Running setup.py commands")

        for pyDir in config.setupPyDirs:
            pyDirPath = "%s/%s" % (productsDirectory, pyDir)
            logger.debug("Changing to %s", pyDirPath)
            with changeToWorkingDir(pyDirPath):
                setupCommand = "python setup.py develop --prefix=%s/grok" % \
                               productsDirectory
                logger.debug("Running %s", setupCommand)
                runWithOutput(setupCommand, env=environment)

        # Run any build scripts. We assume that they should be run in the
        # directory they're in.
        logger.info("Phase 3: Running build scripts...")
        for builder in config.buildScripts:
            builderPath = "%s/%s" % (fakeroot, builder)
            logger.debug("Attempting to run %s", builderPath)
            if not os.path.exists(builderPath):
                raise InvalidParametersError("%s does not exist!" %
                                             builderPath)
            workDirectory = os.path.dirname(builderPath)
            logger.debug("Changing to %s", workDirectory)
            with changeToWorkingDir(workDirectory):
                runWithOutput(builderPath, env=environment)

        # Run any cleanup scripts. We assume that they should be run in the
        # directory they're in.
        logger.info("Phase 4: Running cleanup scripts...")
        # Clean Scripts
        self.cleanScripts()

        logger.info("Phase 5: Purge anything not whitelisted.")
        # Purge anything not whitelisted
        self.purgeBlacklistedStuff()

        return (iteration, actualSHA)
Example #8
0
def runCommandBySSH(dnsName,
                    command,
                    logger,
                    maxRetries=120,
                    sleepDelay=1,
                    user=DEFAULT_USER,
                    silent=False,
                    sshKeyFile=None):
  """
  Run a command on an instance, retrying multiple times.

  :param dnsName: DNS name to run the command on. Required.

  :param command: command to run. Required.

  :param logger: An already initialized logger object. Required.

  :param maxRetries: Maximum retries before giving up on running salt

  :param sleepDelay: Time in seconds between retries

  :param user: User to run the command as.

  :param silent: If True, suppress command output to console.

  :param sshKeyFile: SSH private key to use. Use the user's keys from their
                     ssh-agent keyring if unset.

  :raises: InstanceLaunchError if the command fails to run or has a failure
           during the run.

  :returns: fabric run result of the command

  :rtype: fabric run result
  """
  if not command:
    raise InvalidParametersError("runCommandOnInstance requires a command")
  if not dnsName:
    raise InvalidParametersError("runCommandOnInstance requires a dnsName")
  if not logger:
    raise InvalidParametersError("runCommandOnInstance requires a logger")

  kwargs={ "host_string": dnsName,
           "user": user,
           "timeout": sleepDelay,
           "connection_attempts": maxRetries }
  # We only need to specify a key_filename to settings when sshKeyFile is
  # not None
  if sshKeyFile:
    kwargs["key_filename"] = sshKeyFile

  # Force fabric not to abort if the command fails or we won't be able
  # to retry
  kwargs["warn_only"] = True

  with settings(**kwargs):
    logger.debug("Running %s on %s as %s", command, dnsName, user)
    tries = 0
    while tries < maxRetries:
      tries = tries + 1
      try:
        if silent:
          with hide("output", "running"):
            result = run(command)
        else:
          result = run(command)
        if result.return_code == 0:
          logger.debug("%s completed successfully", command)
          return result
        if tries > maxRetries:
          raise InstanceLaunchError("%s failed to run", command)
        else:
          logger.debug("Try %s failed, retrying in %s seconds", tries,
                       sleepDelay)
          sleep(sleepDelay)
      except NetworkError:
        # If we can't connect to SSH, fabric raises NetworkError
        if tries > maxRetries:
          raise InstanceLaunchError("%s failed to run after %s tries",
                                    command, tries)
        logger.debug("Network error for try %s, retrying in %s seconds",
                     tries, sleepDelay)
        sleep(sleepDelay)