Beispiel #1
0
    def _invokeCommand(self, cmdList=[], cmdString=""):
        """
      Invoke a single command as specified by cmdList and cmdString
      
      Helper for invokeCommands()
      
      NOTE: From experiments, it was determined that the call() method
      needs to be invoked with shell=True.
    """
        methodName = "_invokeCommand"

        if (not cmdList):
            raise MissingArgumentException(
                "A non-empty command list must be provided.")
        #endIf

        if (not cmdString):
            raise MissingArgumentException(
                "A non-empty command string must be provided.")
        #endIf

        TR.info(methodName, "Invoking: %s" % cmdString)
        #retcode = call(cmdList)
        retcode = call(cmdString, shell=True)
        if (retcode != 0):
            raise Exception("Invoking: '%s' Return code: %s" %
                            (cmdString, retcode))
    def createYamlFile(self, filePath, x):
        """
      Dump a yaml representation of the given object (x) to the given filePath. 
    """

        if (not filePath):
            raise MissingArgumentException(
                "The file path (filePath) cannot be empty or None.")
        #endIf

        if (not x):
            raise MissingArgumentException(
                "The object (x) to be dumped to the file cannot be empty or None."
            )
        #endIf

        destDirPath = os.path.dirname(filePath)

        if (not os.path.exists(destDirPath)):
            os.makedirs(destDirPath)
        #endIf

        with open(filePath, 'w') as yamlFile:
            yaml.dump(x, yamlFile, default_flow_style=False)
        #endWith

        return filePath
    def getStackOutput(self, stackId, outputKey):
        """
      For the given stack ID return the value of the given output key.
    """
        methodName = "getStackOutput"

        if (not stackId):
            raise MissingArgumentException(
                "A stack resource ID must be provided.")
        #endIf

        if (not outputKey):
            raise MissingArgumentException("An output key must be provided.")
        #endIf

        response = self.cfnClient.describe_stacks(StackName=stackId)
        if (not response):
            raise AWSStackResourceException(
                "Empty result for CloudFormation describe_stacks for stack: %s"
                % stackId)
        #endIf

        stacks = response.get('Stacks')
        if (len(stacks) != 1):
            raise AWSStackResourceException(
                "Unexpected number of stacks: %d, from describe_stacks for stack: %s"
                % (len(stacks), stackId))
        #endIf

        myStack = stacks[0]
        outputs = myStack.get('Outputs')
        if (not outputs):
            raise AWSStackResourceException(
                "Expecting output with key: %s, defined for stack: %s, but no outputs defined."
                % (outputKey, stackId))
        #endIf

        result = None
        for output in outputs:
            key = output.get('OutputKey')
            if (key == outputKey):
                result = output.get('OutputValue')
                if (TR.isLoggable(Level.FINEST)):
                    TR.finest(
                        methodName, "Got output for key: %s with value: %s" %
                        (outputKey, result))
                #endIf
                break
            #endIf
        #endFor

        # Output value types are strings or other simple types.
        # The actual value of an output will never be None.
        if (result == None):
            TR.warning(
                methodName, "For stack: %s, no output found for key: %s" %
                (stackId, outputKey))
        #endIf

        return result
Beispiel #4
0
  def __init__(self, stackId=None, intrinsicVariables=None, configPath=None):
    """
      stackId - the resource ID of the CloudFormation stack to be used for processing security groups.
                Typically, all security groups are defined in a particular CloudFormation stack used 
                to define security profiles, roles and security groups.
                 
      intrinsicVariables - dictionary of key-value pairs that are predefined for the given deployment
                           and may be used for substitution macros in the security configuration file(s).
     
      Optional parameters:
        configPath - file system path to a security configuration YAML file or directory of YAML files
    """
    object.__init__(self)
        
    if (not stackId):
      raise MissingArgumentException("A stack ID must be provided.")
    #endIf
    
    self.stackId = stackId
    
    if (not intrinsicVariables):
      raise MissingArgumentException("A collection of intrinsic variables must be provided.")
    #endIf

    self.IntrinsicVariables = intrinsicVariables.copy()
    self.IntrinsicVariableNames = self.IntrinsicVariables.keys()
    
    self.configPath = configPath
    
    self.cfnClient = boto3.client('cloudformation')
    self.ec2Resource = boto3.resource('ec2')
    self.ec2Client = boto3.client('ec2')
  def getLoadBalancerDNSName(self,stackIds,elbName=None):
    """
      Return the DNSName for the Elastic Load Balancer V2 with the given name as the value
      of its Name tag.
      
      The stackIds parameter holds the list of all the stacks in the CFN deployment.  
      It is assumed there is only 1 ELB in all of those stacks with the given name.
      (The DNSName of the first one found with the given name gets returned.)
      
      The boto3 API for ELBs is rather baroque.
      
      The tags are gotten using the describe_tags() method.  We need to look at the tags
      in order to find the ELB with Name tag value for the given name (elbName).  The 
      response from the describe_tags() call also includes the ARN (reource Id) for 
      the ELB with the set of tags.
      
      Once we have the ELB ARN, we can get its DNSName with a call to describe_load_balancers()
      followed by getting the IP address attribute from the load balancer instance.
      
    """
    
    if (not stackIds):
      raise MissingArgumentException("A list of stack IDs (stackIds) is required.")
    #endIf
    
    if (not elbName):
      raise MissingArgumentException("The ELB name must be provided.")
    #endIf

    dnsName = ""
    
    for stackId in stackIds:
      elbIId = self.getELBResourceIdForName(stackId, elbName=elbName)
      
      if (elbIId):
        response = self.elbv2Client.describe_load_balancers(LoadBalancerArns=[elbIId])
        if (not response):
          raise AWSStackResourceException("Empty response for ELBv2 Client describe_load_balancers() call for ELB with ARN: %s" % elbIId)
        #endIf
    
        loadBalancers = response.get('LoadBalancers')
        if (not loadBalancers):
          raise AWSStackResourceException("No LoadBalancers in response for ELBv2 Client describe_load_balancers() call for ELB with ARN: %s" % elbIId)
        #endIf
    
        if (len(loadBalancers) != 1):
          raise AWSStackResourceException("Unexpected number of LoadBalancers from ELBv2 Client describe_load_balancers() call for ELB with ARN: %s" % elbIId)
        #endIf
    
        loadBalancer = loadBalancers[0]
    
        dnsName = loadBalancer.get('DNSName')
        if (not dnsName):
          raise AWSStackResourceException("Empty DNSName attribute for ELB with ARN: %s" % elbIId)
        #endIf
        break
      #endIf
    #endFor
    
    return dnsName
    def fillInDefaultValues(self,
                            parameterNames=None,
                            defaultValues=None,
                            **restArgs):
        """
      Return a dictionary with values for each parameter in parameterNames that is
      the value in restArgs or the default value in defaultValues. Ff the parameter 
      is not defined in restArgs the default value is used.
    """

        result = {}

        if (not parameterNames):
            raise MissingArgumentException(
                "A parameter names list must be provided.")
        #endIf

        if (not defaultValues):
            raise MissingArgumentException(
                "A dictionary of default values must be provided.")
        #endIf

        for parmName in parameterNames:
            parmValue = restArgs.get(parmName, defaultValues.get(parmName))
            if (parmValue):
                result[parmName] = parmValue
            #endIf
        #endFor

        return result
    def publishReadiness(self, stackName, fqdn):
        """
      Put a parameter in /stackName/fqdn indicating readiness for ICP installation to proceed.
    """
        methodName = "publishReadiness"

        if (not stackName):
            raise MissingArgumentException(
                "The stack name (stackName) must be provided and cannot be empty."
            )
        #endIf

        if (not fqdn):
            raise MissingArgumentException(
                "The FQDN for this node must be provided and cannot be empty.")
        #endIf

        parameterKey = "/%s/%s" % (stackName, fqdn)

        TR.info(methodName,
                "Putting READY to SSM parameter: %s" % parameterKey)
        self.ssm.put_parameter(Name=parameterKey,
                               Description="Cluster node: %s is READY" % fqdn,
                               Value="READY",
                               Type='String',
                               Overwrite=True)
        TR.info(methodName, "Node: %s is READY has been published." % fqdn)
Beispiel #8
0
  def getS3Object(self, bucket=None, s3Path=None, destPath=None):
    """
      Return destPath which is the local file path provided as the destination of the download.
      
      A pre-signed URL is created and used to download the object from the given S3 bucket
      with the given S3 key (s3Path) to the given local file system destination (destPath).
      
      The destination path is assumed to be a full path to the target destination for 
      the object. 
      
      If the directory of the destPath does not exist it is created.
      It is assumed the objects to be gotten are large binary objects.
      
      For details on how to download a large file with the requests package see:
      https://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py
    """
    methodName = "getS3Object"
    
    if (not bucket):
      raise MissingArgumentException("An S3 bucket name (bucket) must be provided.")
    #endIf
    
    if (not s3Path):
      raise MissingArgumentException("An S3 object key (s3Path) must be provided.")
    #endIf
    
    if (not destPath):
      raise MissingArgumentException("A file destination path (destPath) must be provided.")
    #endIf
    
    TR.info(methodName, "STARTED download of object: %s from bucket: %s, to: %s" % (s3Path,bucket,destPath))
    
    s3url = self.s3.generate_presigned_url(ClientMethod='get_object',Params={'Bucket': bucket, 'Key': s3Path})
    if (TR.isLoggable(Level.FINE)):
      TR.fine(methodName,"Getting S3 object with pre-signed URL: %s" % s3url)
    #endIf
    
    destDir = os.path.dirname(destPath)
    if (not os.path.exists(destDir)):
      os.makedirs(destDir)
      TR.info(methodName,"Created object destination directory: %s" % destDir)
    #endIf
    
    r = requests.get(s3url, stream=True)
    with open(destPath, 'wb') as destFile:
      shutil.copyfileobj(r.raw, destFile)
    #endWith

    TR.info(methodName, "COMPLETED download from bucket: %s, object: %s, to: %s" % (bucket,s3Path,destPath))
    
    return destPath
  def getELBResourceIdForName(self,stackId,elbName=None):
    """
      Return the Elastic Load Balancer ARN with the given name as the value of its Name tag.
      
      If no ELB is found with the given name in its Name tag then the empty string is returned.
    """
    if (not stackId):
      raise MissingArgumentException("A stack ID (stackId) is required.")
    #endIf

    if (not elbName):
      raise MissingArgumentException("An Elastic Load Balancer Name (elbName) must be provided.")
    #endIf
    
    elbResourceId = ""
    
    elbIIds = self.listELBResoures(stackId)
    
    if (elbIIds):
      for elbIId in elbIIds:
        response = self.elbv2Client.describe_tags(ResourceArns=[elbIId])
        if (not response):
          raise AWSStackResourceException("Empty response for ELBv2 Client describe_tags() for Elastic Load Balancer with ARN: %s" % elbIId)
        #endIf
      
        tagDescriptions = response.get('TagDescriptions')
        if (len(tagDescriptions) != 1):
          raise AWSStackResourceException("Unexpected number of TagDescriptions in describe_tags() response from ELB with ARN: %s" % elbIId)
        #endIf
        
        tagDescription = tagDescriptions[0]
        tags = tagDescription.get('Tags')
        if (not tags):
          raise AWSStackResourceException("All Elastic Load Balancers must have at least a Name tag.  No tags found for ELB with ARN: %s" % elbIId)
        #endIf
        
        for tag in tags:
          if (tag.get('Key') == 'Name'):
            if (tag.get('Value') == elbName):
              elbResourceId = tagDescription.get('ResourceArn')
              break
            #endIf
          #endIf
        #endFor
        
        if (elbResourceId): break
      #endFor
    #endIf
    
    return elbResourceId
Beispiel #10
0
  def _processSecurityGroup(self, sg):
    """
      Process an object of type: EC2_SecurityGroup
      
      If there is a security group defined with the same name as the given security
      group, then the given security group is checked for any new ingress or egress 
      rules to be added to the existing security group.
      
      If there is no security group already defined with the name of the given 
      security group then a new security group is created as defined by the given
      security group.
    """
    methodName = "_processSecurityGroup"

    if (not sg):
      raise MissingArgumentException("A security group object (securityGroup) must be provided.")
    #endIf

    if TR.isLoggable(Level.FINER):
      TR.finer(methodName,"Processing security group: %s" % sg.group_name)
    #endIf    
    
    # existing_sg is an instance of the boto3 EC2.SecurityGroup
    existing_sg = self.getSecurityGroup(self.stackId, sg.group_name)
    if (not existing_sg):
      self._createSecurityGroup(sg)
    else:
      self._updateSecurityGroup(existing_sg,sg)
  def __init__(self, commandPath=None, intrinsicVariables=None, **restArgs):
    """
      Constructor
      
      commandPath - path to directory that holds the command definition yaml files
      instrinsicVariables - dictionary of name-value pairs for variables defined by the framework

      restArgs:
      
        restArgs may have values for custom variables that override the 
        custom variable values in the variables yaml
         
    """    
    object.__init__(self)
    
    if (not commandPath):
      raise MissingArgumentException("The command path must be provided.")
    #endIf

    self.commandPath = commandPath
    
    if (intrinsicVariables):
      self.IntrinsicVariables = intrinsicVariables.copy()
      self.IntrinsicVariableNames = self.IntrinsicVariables.keys()
    #endIf
    
    sensitiveVariables = restArgs.get('sensitiveVariables')
    if (sensitiveVariables):
      self.SensitiveVariables = sensitiveVariables
    #endIf
    
    self.home = os.path.expanduser('~')
      
    self.variableValues = self.__initVariableValues(self.commandPath, **restArgs)
    self.metadata = self.__initMetaData(self.commandPath)
  def listELBResoures(self,stackId):
    """
      Return a list of ELB resource instance IDs from the given stack.
      
      An empty list is returned if there are no ELB instances in the given stack.
    """
    
    if (not stackId):
      raise MissingArgumentException("A stack ID (stackId) is required.")
    #endIf

    response = self.cfnClient.list_stack_resources(StackName=stackId)
    if (not response):
      raise AWSStackResourceException("Empty result for CloudFormation list_stack_resources for stack: %s" % stackId)
    #endIf
    
    stackResources = response.get('StackResourceSummaries')
    if (not stackResources):
      raise AWSStackResourceException("Empty StackResourceSummaries in response from CloudFormation list_stack_resources for stack: %s." % stackId)
    #endIf

    elbIIDs = []
    for resource in stackResources:
      resourceType = resource.get('ResourceType')
      if (resourceType == 'AWS::ElasticLoadBalancingV2::LoadBalancer'):
        elbInstanceId = resource.get('PhysicalResourceId')
        elbIIDs.append(elbInstanceId)        
      #endIf
    #endFor

    return elbIIDs
 def getConfigParameters(self, parameters):
   """
     Return a dictionary with configuration parameter name-value pairs extracted
     from the given parameters dictionary.
     
     Only parameters with names in the ConfigurationParameterNames list
     are included in the result set.
   
   """
   methodName = "getConfigParameters"
   
   if (not parameters):
     raise MissingArgumentException("The dictionary of parameters from which to get the configuration parameters must be provided.")
   #endIf
   
   if (TR.isLoggable(Level.FINEST)):
     TR.finest(methodName,"ConfigurationParameterNames: %s" % ConfigurationParameterNames)
   #endIf
   
   result = {}
   
   for parmName in parameters.keys():
     if (parmName in ConfigurationParameterNames):
       result[parmName] = parameters[parmName]
     #endIf
   #endFor
   
   return result
    def __init__(self,
                 commandSetsPath=None,
                 intrinsicVariables=None,
                 sensitiveVariables=None):
        """
      Constructor
    """
        object.__init__(self)

        if (not commandSetsPath):
            raise MissingArgumentException(
                "A file system path to the command sets directory must be provided."
            )
        #endIf
        self.commandSetsPath = commandSetsPath

        if (intrinsicVariables):
            self.IntrinsicVariables = intrinsicVariables.copy()
            self.IntrinsicVariableNames = self.IntrinsicVariables.keys()
        #endIf

        if (sensitiveVariables):
            self.SensitiveVariables = sensitiveVariables
        #endIf

        self.rc = 0
    def mountEFSVolumes(self, volumes):
        """
      Mount the EFS storage volumes for the audit log and the Docker registry.
      
      volumes is either a singleton instance of EFSVolume or a list of instances
      of EFSVolume.  EFSVolume has everything needed to mount the volume on a
      given mount point.
      
      NOTE: It is assumed that nfs-utils (RHEL) or nfs-common (Ubuntu) has been
      installed on the nodes were EFS mounts are implemented.

      Depending on what EFS example you look at the options to the mount command vary.
      The options used in this method are from this AWS documentation:
      https://docs.aws.amazon.com/efs/latest/ug/wt1-test.html
      Step 3.3 has the mount command template and the options are:
      nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport
      
      We explicitly add the following default mount options:
      rw,suid,dev,exec,auto,nouser
    """
        methodName = "mountEFSVolumes"

        if (not volumes):
            raise MissingArgumentException(
                "One or more EFS volumes must be provided.")
        #endIf

        if (type(volumes) != type([])):
            volumes = [volumes]
        #endIf

        # See method doc above for AWS source for mount options used in the loop body below.
        options = "rw,suid,dev,exec,auto,nouser,nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport"

        for volume in volumes:
            if (not os.path.exists(volume.mountPoint)):
                os.makedirs(volume.mountPoint)
                TR.info(
                    methodName, "Created directory for EFS mount point: %s" %
                    volume.mountPoint)
            elif (not os.path.isdir(volume.mountPoint)):
                raise Exception(
                    "EFS mount point path: %s exists but is not a directory." %
                    volume.mountPoint)
            else:
                TR.info(
                    methodName,
                    "EFS mount point: %s already exists." % volume.mountPoint)
            #endIf
            retcode = call("mount -t nfs4 -o %s %s:/ %s" %
                           (options, volume.efsServer, volume.mountPoint),
                           shell=True)
            if (retcode != 0):
                raise Exception(
                    "Error return code: %s mounting to EFS server: %s with mount point: %s"
                    % (retcode, volume.efsServer, volume.mountPoint))
            #endIf
            TR.info(
                methodName, "%s mounted on EFS server: %s:/ with options: %s" %
                (volume.mountPoint, volume.efsServer, options))
Beispiel #16
0
    def __init__(self, **restArgs):
        """
      Constructor
      
      Required arguments:
        ClusterDNSName    - the DNS name used for the CN of the cluster cert and used to access the ICP admin console
        HelmKeyPath       - the path to the user key file, e.g., ~/.kube/kubecfg.key or <icp_home>/cluster/cfc-certs/helm/admin.key
        HelmCertPath      - the path to the user cert file, e.g., ~/.kube/kubecfg.cert or <icp_home>/cluster/cfc-certs/helm/admin.crt
        ClusterCertPath   - the path to the file that holds the CA cert.  The cluster CA cert is usually used for this.
    """
        object.__init__(self)

        self.ClusterDNSName = restArgs.get('ClusterDNSName')
        if (not self.ClusterDNSName):
            raise MissingArgumentException(
                "The cluster DNS name to be used to access the ICP master must be provided."
            )
        #endIf

        self.HelmKeyPath = restArgs.get('HelmKeyPath')
        if (not self.HelmKeyPath):
            raise MissingArgumentException(
                "The file path to the Helm user key must be provided.")
        #endIf

        self.HelmCertPath = restArgs.get('HelmCertPath')
        if (not self.HelmCertPath):
            raise MissingArgumentException(
                "The file path to the Helm user certificate must be provided.")
        #endIf

        self.ClusterCertPath = restArgs.get('ClusterCertPath')
        if (not self.ClusterCertPath):
            raise MissingArgumentException(
                "The file path to the cluster certificate must be provided.")
        #endIf

        HelmHome = restArgs.get('HelmHome')
        if (not HelmHome):
            HelmHome = os.path.join(os.path.expanduser('~'), ".helm")
        #endIf
        self.HelmHome = HelmHome

        self.UserKeyPath = os.path.join(self.HelmHome, "key.pem")
        self.UserCertPath = os.path.join(self.HelmHome, "cert.pem")
        self.CACertPath = os.path.join(self.HelmHome, "ca.pem")
    def loadInstallMap(self, version=None, region=None):
        """
      Return a dictionary that holds all the installation image information needed to 
      retrieve the installation images from S3. 
      
      Which install images to use is driven by the ICP version.
      Which S3 bucket to use is driven by the AWS region of the deployment.
      
      The source of the information is icp-install-artifact-map.yaml packaged with the
      boostrap script package.  The yaml file holds the specifics regarding which bucket
      to use and the S3 path for the ICP and Docker images as well as the Docker image
      name and the inception commands to use for the installation.          
    """
        methodName = "loadInstallMap"

        if (not version):
            raise MissingArgumentException("The ICP version must be provided.")
        #endIf

        if (not region):
            raise MissingArgumentException("The AWS region must be provided.")
        #endIf

        installDocPath = os.path.join(self.home, "maps",
                                      "icp-install-artifact-map.yaml")

        with open(installDocPath, 'r') as installDocFile:
            installDoc = yaml.load(installDocFile)
        #endWith

        if (TR.isLoggable(Level.FINEST)):
            TR.finest(methodName, "Install doc: %s" % installDoc)
        #endIf

        installMap = installDoc.get(version)
        if (not installMap):
            raise ICPInstallationException(
                "No ICP or Docker installation images defined for ICP version: %s"
                % version)
        #endIf

        # The version is needed to get to the proper folder in the region bucket.
        installMap['version'] = version
        installMap['s3bucket'] = self.ICPArchiveBucketName

        return installMap
Beispiel #18
0
    def scpGetFile_broken(self, fromPath=None, toPath=None):
        """
      Use scp to copy a file from the master node to the local file system
      
      WARNING: This never worked due to some issue with parameko and pycryptodome
               I didn't have time to sort it out.  I think parmeko needs to use
               Pycrypto, but Pycrypto is no longer maintained and has security holes.
    """
        methodName = "scpGetFile"

        if (not self.masterNode):
            raise FileTransferException(
                "The master host IP address or host name must be defined when the instance is created."
            )
        #endIf

        if (not fromPath):
            raise MissingArgumentException(
                "The source path (fromPath) on the remote file system must be provided."
            )
        #endIf

        if (not toPath):
            raise MissingArgumentException(
                "The destination path (toPath) on the local file system must be provided."
            )
        #endIf

        if (TR.isLoggable(Level.FINEST)):
            TR.finest(methodName,
                      "Creating an ssh connection to: %s" % self.masterNode)
        #endIf
        self.ssh.connect(self.masterNode)

        scp = SCPClient(self.ssh.get_transport())

        if (TR.isLoggable(Level.FINER)):
            TR.finer(
                methodName, "From host: %s, copying: %s to %s" %
                (self.masterNode, fromPath, toPath))
        #endIf

        scp.get(fromPath, local_path=toPath)

        if (TR.isLoggable(Level.FINEST)):
            TR.finest(methodName, "Copy completed")
Beispiel #19
0
 def __init__(self,region=None, bucket=None, keyPrefix='logs', role=None, fqdn=None):
   """
     Constructor
     
     region - the AWS region name
     bucket - the S3 bucket name.  The bucket gets created if it does not exist.
     keyPrefix - the S3 key prefix to be used for each log export to S3, 
       e.g., logs/<stackname> where <stackname> is the name of the CloudFormation
             stack associated with the root template for a given deployment.
             The root stack name is unique.
             Using logs as the beginning of the prefix keeps all logs in a 
             separate "folder" of the bucket.
     role - the role of the node (VM) exporting the logs
     fqdn - fully qualified domain name of the node exporting the logs
            The FQDN provides uniqueness as there may be more than one node 
            with a given role.
   """
   object.__init__(self)
   
   if (not region):
     raise MissingArgumentException("The AWS region name must be provided.")
   #endIf
   self.region = region
   
   if (not bucket):
     raise MissingArgumentException("The S3 bucket name for the exported logs must be provided.")
   #endIf
   self.bucket = bucket
   
   self.keyPrefix = keyPrefix
   
   if (not role):
     raise MissingArgumentException("The role of the node exporting the logs must be provided.")
   #endIf
   self.role = role
   
   if (not fqdn):
     raise MissingArgumentException("The FQDN of the node exporting the logs must be provided.")
   #endIf
   self.fqdn = fqdn
   
   self.s3Helper = S3Helper(region=region)
   
   if (not self.s3Helper.bucketExists(bucket)):
     self.s3Helper.createBucket(bucket,region=region)
Beispiel #20
0
    def scpGetFile(self, fromPath=None, toPath=None):
        """
      Use scp to copy a file from the master node to the local file system
      
      This implementation uses a call out to bash to do the scp.
    """
        methodName = "scpGetFile"

        if (not self.masterNode):
            raise FileTransferException(
                "The master host IP address or host name must be defined when the instance is created."
            )
        #endIf

        if (not fromPath):
            raise MissingArgumentException(
                "The source path (fromPath) on the remote file system must be provided."
            )
        #endIf

        if (not toPath):
            raise MissingArgumentException(
                "The destination path (toPath) on the local file system must be provided."
            )
        #endIf

        if (TR.isLoggable(Level.FINER)):
            TR.finer(
                methodName, "From host: %s, copying: %s to %s" %
                (self.masterNode, fromPath, toPath))
        #endIf

        retcode = call(["scp", "%s:%s" % (self.masterNode, fromPath), toPath])
        if (retcode != 0):
            raise Exception("Error calling scp. Return code: %s" % retcode)
        #endIf

        if (TR.isLoggable(Level.FINEST)):
            TR.finest(methodName, "Copy completed")
    def __init__(self, region=None, stackId=None, **restArgs):
        """
      Constructor
      
      The region input is the AWS region where the EFS provisioner is running.
      
      The stackId input parameter is expected to be a AWS stack resource ID.
      The stackId is used to get the stack parameters among which is:
         EFSDNSName
         ApplicationStorageMountPoint
         EFSFileSystemId
         ClusterDNSName
      
      The restArgs keyword arguments include the following required parameters:
        playbookPath         - the path to the playbook to use to configure EFS
        varTemplatePath      - the path to the EFS configuration variable template
        manifestTemplatePath - the path to the EFS provisioner manifest YAML
        rbacTemplatePath     - the path to the EFS provisioner RBAC YAML
        serviceAccountPath   - the path to the EFS service account YAML
    """
        object.__init__(self)

        if (not region):
            raise MissingArgumentException(
                "The AWS region name must be provided.")
        #endIf
        self.AWSRegion = region

        if (not stackId):
            raise MissingArgumentException(
                "The CloudFormation boot stack ID (stackId) must be provided.")
        #endIf

        self.stackId = stackId
        self.cfnResource = boto3.resource('cloudformation')
        self.home = os.path.expanduser("~")
        self._init(stackId, **restArgs)
Beispiel #22
0
  def configureSecurity(self, configPath=None):
    """
      Process one or more security configuration files, either given in the configPath 
      or associated with the instance at the time it was created.
    """
    
    if (not configPath):
      configPath = self.configPath
    else:
      self.configPath = configPath
    #endIf
    
    if (not configPath):
      raise MissingArgumentException("The security configuration file or directory path must be provided either at instance creation or on the method call.")
    #endIf
    
    if (os.path.isdir(configPath)):
      templateFiles = self.getYaml(configPath)
      if (not templateFiles):
        raise InvalidArgumentException("No .yaml files found in directory path: %s" % configPath)
      #endIf
    else:
      if (not os.path.isfile(configPath)):
        raise InvalidArgumentException("The given path is not a file: %s" % configPath)
      #endIf
      templateFiles = [configPath]
    #endIf
    
    stagingDir = os.path.join(os.getcwd(),'staging')
    # Configuration files get created in the staging directory.
    if (not os.path.exists(stagingDir)):
      os.mkdir(stagingDir)
    #endIf
    
    for template in templateFiles:
      baseName = os.path.basename(template)
      rootName,ext = os.path.splitext(baseName)
      configFilePath = os.path.join(stagingDir,"%s-config%s" % (rootName,ext))
      
      self.createConfigFile(configFilePath=configFilePath,
                            templateFilePath=template,
                            parameters=self.IntrinsicVariables
                            )
      
      self._configureSecurity(configFilePath)
    #endFor
  #endDef
  
#endClass
    def invokeCommands(self, cmdDocs, start, **kwargs):
        """
      Process command docs to invoke each command in sequence that is of kind s3.  

      Processing of cmdDocs stops as soon as a doc kind that is not s3 is encountered.

      All cmdDocs that are processed are marked with a status attribute with the value PROCESSED.
             
      cmdDocs - a list of 1 or more YAML documents loaded from yaml.load_all()
      by the caller.
      
      start - index where to start processing in the cmdDocs list.
      
      NOTE: The method for each command is responsible for pulling out the arguments for the 
      underlying S3 method.  The S3 client methods only accept the arguments in the signature.
      Extraneous keyword arguments cause an exception to be raised.
    """
        if (not cmdDocs):
            raise MissingArgumentException(
                "A non-empty list of command documents (cmdDocs) must be provided."
            )
        #endIf

        for i in range(start, len(cmdDocs)):
            doc = cmdDocs[i]

            kind = doc.get('kind')
            if (not kind or kind != 's3'):
                break
                # done

            command = doc.get('command')
            if (not command):
                raise InvalidArgumentException(
                    "A helm command document: %s, must have a command attribute."
                    % doc)
            #endIf

            getattr(self, command)(**doc)

            doc['status'] = 'PROCESSED'
        #endFor

    #endDef


#endClass
Beispiel #24
0
    def invokeCommands(self, cmdDocs, start, **kwargs):
        """
      Process command docs to invoke each command in sequence that is of kind docker.  

      Processing of cmdDocs stops as soon as a doc kind that is not docker is encountered.

      All cmdDocs that are processed are marked with a status attribute with the value PROCESSED.
             
      cmdDocs - a list of 1 or more YAML documents loaded from yaml.load_all()
      by the caller.
      
      start - index where to start processing in the cmdDocs list.
      
    """
        if (not cmdDocs):
            raise MissingArgumentException(
                "A non-empty list of command documents (cmdDocs) must be provided."
            )
        #endIf

        for i in range(start, len(cmdDocs)):
            doc = cmdDocs[i]

            kind = doc.get('kind')
            if (not kind or kind != 'docker'):
                break
                # done

            command = doc.get('command')
            if (not command):
                raise InvalidArgumentException(
                    "A docker command document: %s, must have a command attribute."
                    % doc)
            #endIf

            getattr(self, command)(**doc)

            doc['status'] = 'PROCESSED'

        #endFor

    #endDef


#endClass
Beispiel #25
0
    def __init__(self, user='******', clusterName=None, masterNode=None):
        """
      Constructor
    """

        object.__init__(self)

        self.user = user
        self.masterNode = masterNode
        self.home = os.path.expanduser('~')
        self.kube = os.path.join(self.home, ".kube")

        if (not clusterName):
            raise MissingArgumentException(
                "The cluster name must be provided.")
        #endIf

        self.clusterName = clusterName
  def _getVIPs(self, stackId):
    """
      Return a dictionary where the key of each entry is the VIP name
      and the value associated with each key is the private IP address.
      
      The stack with the given stackId is expected to have two 
      EC2::NetworkInterface resources that have a private IP address 
      assigned to them.  One IP address is used for the master VIP 
      and the other is used for the proxy VIP.
      
      Each NetworkInterface has a Name tag that identifies which IP
      is to be used for the MasterVIP and which for the ProxyVIP.
      The specific IP that is used for a given VIP does not matter, but
      for the purpose of knowing which is being used for what, the 
      NetworkInterface resources are named.
    """

    vips = {}
    
    if (not stackId):
      raise MissingArgumentException("A stack ID (stackId) is required.")
    #endIf
    
    response = self.cfnClient.list_stack_resources(StackName=stackId)
    if (not response):
      raise AWSStackResourceException("Empty result for CloudFormation list_stack_resources for stack: %s" % stackId)
    #endIf
    
    stackResources = response.get('StackResourceSummaries')
    if (not stackResources):
      raise AWSStackResourceException("Empty StackResourceSummaries in response from CloudFormation list_stack_resources for stack: %s." % stackId)
    #endIf

    for resource in stackResources:
      resourceType = resource.get('ResourceType')
      if (resourceType == 'AWS::EC2::NetworkInterface'):
        interfaceId = resource.get('PhysicalResourceId')
        vipName,vipAddress = self._getVIPNameAndAddress(interfaceId)
        vips[vipName] = vipAddress        
      #endIf
    #endFor

    return vips
    def _getRequiredArgs(self, method, **kwargs):
        """
      Return a list of required arguments for the given method 
    """
        requiredArgs = []
        argNames = S3ClientMethodRequiredArgs.get(method)
        if (argNames):
            for argName in argNames:
                argValue = kwargs.get(argName)
                if (argValue == None):
                    raise MissingArgumentException(
                        "The S3 client method: '%s' requires a '%s' argument."
                        % (method, argName))
                #endIf
                requiredArgs.append(argValue)
            #endFor
        #endIF

        return requiredArgs
 def runAnsiblePlaybook(self, playbook=None, extraVars=None, inventory="/etc/ansible/hosts"):
   """
     Invoke a shell script to run an Ansible playbook with the given arguments.
     
     extraVars can be a list of argument values or a single string with space separated argument values.
       list example: [ "target_nodes=icp", "host_addres=9.876.54.32", "host_name=mycluster.example.com" ]
       string example: "target_nodes=icp host_addres=9.876.54.32 host_name=mycluster.example.com" 
     
   """
   methodName = "runAnsiblePlaybook"
   
   if (not playbook):
     raise MissingArgumentException("The playbook path must be provided.")
   #endIf
   
   try:
     if (extraVars):
       if (type(extraVars) != type([])):
         # Assume extraVars is a string with space separated values
         extraVars = extraVars.split()
       #endIf
       
       cmd = ["ansible-playbook", playbook, "--inventory", inventory]
       for var in extraVars:
         cmd.extend(["-e", "%s" % var])
       #endFor
       TR.info(methodName, "Executing: cmd: %s" % cmd)
       retcode = call(cmd)
     else:
       TR.info(methodName, 'Executing: ansible-playbook %s, --inventory %s.' % (playbook,inventory))
       retcode = call(["ansible-playbook", playbook, "--inventory", inventory ] )
     #endIf        
     if (retcode != 0):
       raise Exception("Error calling ansible-playbook. Return code: %s" % retcode)
     else:
       TR.info(methodName,"ansible-playbook: %s completed." % playbook)
     #endIf
   except Exception as e:
     TR.error(methodName,"Error calling ansible-playbook: %s" % e, e)
     raise
    def createBucket(self, bucketName, region=None):
        """
      Return an instance of S3 bucket either for a bucket that already
      exists or for a newly created bucket in the given region.
      
      NOTE: Region is required, either on the method call or to the S3Helper instance. 
      
    """
        methodName = "createBucket"

        bucket = None
        if (self.bucketExists(bucketName)):
            bucket = self.s3Resource.Bucket(bucketName)
        else:
            if (region):
                response = self.s3Client.create_bucket(
                    Bucket=bucketName,
                    CreateBucketConfiguration={'LocationConstraint': region})
            elif (self.region):
                response = self.s3Client.create_bucket(
                    Bucket=bucketName,
                    CreateBucketConfiguration={
                        'LocationConstraint': self.region
                    })
            else:
                raise MissingArgumentException(
                    "The AWS region name for the bucket must be provided either to the S3Helper instance or in the createBucket() arguments."
                )
            #endIf

            if (TR.isLoggable(Level.FINE)):
                TR.fine(
                    methodName, "Bucket: %s created in region: %s" %
                    (bucketName, response.get('Location')))
            #endIf
            bucket = self.s3Resource.Bucket(bucketName)
        #endIf

        return bucket
def mountEFSVolumes(volumes, options=""):
    """
    Mount the EFS storage volumes.
    
    volumes is either a singleton instance of EFSVolume or a list of instances
    of EFSVolume.  EFSVolume has everything needed to mount the volume on a
    given mount point.

    NOTE: The default instance mount options are very likely correct.
    Don't mess with the mount options unless you know what you are doing.

  """

    if (not volumes):
        raise MissingArgumentException(
            "One or more EFS volumes must be provided.")
    #endIf

    if (type(volumes) != type([])):
        volumes = [volumes]
    #endIf

    for volume in volumes:
        volume.mount(options)