Exemple #1
0
 def _executeExitOnError(self, cmd_str):
     rc, output = Util.execute(cmd_str.split(' '),
                               withOutput=True,
                               verboseLevel=self.verboseLevel,
                               verboseThreshold=Util.VERBOSE_LEVEL_DETAILED)
     if rc != 0:
         printError('Failed running: %s\n%s' % (cmd_str, output))
Exemple #2
0
 def _configureFileSharingClient(self):
     if self.shareType == 'nfs':
         self._configureNfsClient()
     elif self.shareType in ('ssh', 'stratuslab'):
         self.node.configureSshClient(self.cloudVarLibDir)
     else:
         printError('Unable to determine node share type. Got %s' % self.shareType)
 def _configureFileSharingServer(self):
     if self.shareType == "nfs":
         self._configureNfsServer()
     elif self.shareType in ("ssh", "stratuslab"):
         self.frontend.configureSshServer()
     else:
         printError("Unable to determine frontend share type. Got %s" % self.shareType)
Exemple #4
0
 def _sunstoneService(self, action):
     cmd = 'su - %s -c "sunstone-server %s"' % (self.oneUsername, action)
     rc, output = self.system.executeCmdWithOutput(cmd, shell=True)
     if rc != 0:
         msg = "Failed to %s sunstone-server: %s" % (action, output)
         if action == 'start':
             printError(msg)
         else:
             printWarning(msg)
    def _checkVolumeSize(self):
        if self.options.originUuid or self.options.rebaseUuid:
            return

        if not self.options.volumeSize:
            printError('Missing mandatory -s/--size option')

        if self.options.volumeSize < self.VOLUME_SIZE_MIN or self.options.volumeSize > self.VOLUME_SIZE_MAX:
            printError('Volume size must be a valid integer between %d and %d' %
                       (self.VOLUME_SIZE_MIN, self.VOLUME_SIZE_MAX))
 def doWork(self):
     configHolder = ConfigHolder(self.options.__dict__, self.config or {})
     configHolder.pdiskProtocol = "https"
     pdisk = VolumeManagerFactory.create(configHolder)
     for uuid in self.uuids:
         try:
             target = pdisk.hotDetach(self.options.instance, uuid)
             print 'DETACHED %s from VM %s on /dev/%s' % (uuid, self.options.instance, target)
         except Exception, e:
             printError('DISK %s: %s' % (uuid, e), exit=False)
 def doWork(self):
     configHolder = ConfigHolder(self.options.__dict__, self.config or {})
     configHolder.pdiskProtocol = "https"
     pdisk = VolumeManagerFactory.create(configHolder)
     for uuid in self.uuids:
         free, _ = pdisk.getVolumeUsers(uuid)
         if free < 1:
             printError('DISK %s: Disk not available\n' % uuid, exit=False)
         else:
             target = pdisk.hotAttach(self.node, self.options.instance, uuid)
             print 'ATTACHED %s in VM %s on /dev/%s' % (uuid, self.options.instance, target)
Exemple #8
0
 def getFQNHostname(hostname):
     """
     Returns the fully qualified domain name for the given hostname.
     If it isn't possible to find it, an error is printed and the
     original hostname is returned.
     """
     try:
         return getfqdn(hostname)
     except Exception:
         printError('Unable to translate endpoint "%s" to an IP address' % hostname,
                    exit=False)
         return hostname
 def checkOptions(self):
     super(MainProgram, self).checkOptions()
     if not self.uuids:
         printError('Please provide at least one persistent disk UUID to attach')
     if self.options.instance < 0:
         printError('Please provide a VM ID on which to attach disk')
     try:
         self._setupCloudConnection()
         if not self.options.cloud.isVmRunning(self.options.instance):
             printError('VM %s is not in running state' % self.options.instance)
         self.node = self.options.cloud.getVmNode(self.options.instance)
     except OneException, e:
         printError(e)
Exemple #10
0
    def _manageExtraContext(self):
        extraContext = {}
        contextElems = []

        if self.extraContextFile:
            contextFile = open(self.extraContextFile, "rb")
            contextFileData = contextFile.read()
            contextFile.close()
            contextElems.extend(contextFileData.split("\n"))

        if self.extraContextData:
            contextElems.extend(self.extraContextData.split(Util.cliLineSplitChar))

        if self.cloudInit or (
            hasattr(self, "defaultContextMethod")
            and self.defaultContextMethod
            and self.defaultContextMethod == "cloud-init"
        ):
            if self.cloudInit is None:
                self.cloudInit = ""

            cloudInitArgs = self.cloudInit.split(Util.cliLineSplitChar)
            cloudInitData = CloudInitUtil.context_file(cloudInitArgs, default_public_key_file=self.userPublicKeyFile)
            contextElems.extend(cloudInitData.split("\n"))

        for line in contextElems:
            if len(line) == 0:
                continue

            contextLine = line.split("=")

            if len(contextLine) < 2:
                printError("Error while parsing contextualization file.\n" "Syntax error in line `%s`" % line)

            extraContext[contextLine[0]] = "=".join(contextLine[1:])

        contextData = ['%s = "%s",' % (key, value) for key, value in extraContext.items()]

        self._appendContextData(contextData)
 def checkOptions(self):
     super(MainProgram, self).checkOptions()
     if not self.uuids:
         printError('Please provide at least one persistent disk UUID to detach')
     if self.options.instance < 0:
         printError('Please provide a VM ID on which to detach disk')
     try:
         self._retrieveVmNode()
     except OneException, e:
         printError(e)
Exemple #12
0
 def _checkPersistentDiskAvailable(self):
     self.pdisk = VolumeManagerFactory.create(self.configHolder)
     try:
         available, _ = self.pdisk.getVolumeUsers(self.persistentDiskUUID)
         if self.instanceNumber > available:
             printError("disk cannot be attached; it is already mounted (%s/%s)" % (available, self.instanceNumber))
     except AttributeError:
         printError("Persistent disk service unavailable", exit=False)
         raise
     except Exception as e:
         printError(e, exit=False)
         raise
Exemple #13
0
    def deploy(self):
        ssh = SSHUtil(self._runner.userPrivateKeyFile, self.cluster_admin)

        # Wait until all the images are up and running
        vmNetworkDetails = []
        vmStartTimeout = 600

        # wait until the each machine is up or timeout after 15 minutes
        printStep("Waiting for all cluster VMs to be instantiated...")
        if self._is_heterogeneous:
            printStep("Waiting for master")
            self._runner.waitUntilVmRunningOrTimeout(self._master_vmid, vmStartTimeout)
            vmNetworkDetails.append(self._runner.getNetworkDetail(self._master_vmid))

        for vmId in self._runner.vmIds:
            printDebug('Waiting for instance to start running %s' % str(vmId))
            self._runner.waitUntilVmRunningOrTimeout(vmId, vmStartTimeout)
            vmNetworkDetails.append(self._runner.getNetworkDetail(vmId))

        vm_cpu, vm_ram, vm_swap = self._runner.getInstanceResourceValues()

        for vmNetwork in vmNetworkDetails:
            if vmNetwork[0] == 'public':
                host = Host()
                host.public_ip = vmNetwork[1]

                try:
                    host.public_dns = socket.gethostbyaddr(host.public_ip)[0]
                except:
                    host.public_dns = host.public_ip

                host.cores = vm_cpu
                host.ram = vm_ram
                host.swap = vm_swap
                self.hosts.append(host)

        printStep("Waiting for all instances to become accessible...")

        failedHosts = []

        for host in self.hosts:
            hostReady = False
            hostFailed = False

            while not hostReady and not hostFailed:
                if not ssh.waitForConnectivity(host, vmStartTimeout):
                    printError('Timed out while connecting to %s.  Removing from target config. list.' % host.public_ip)
                    failedHosts.append(host)
                    hostFailed = True
                else:
                    hostReady = True

        if len(failedHosts) > 0:
            if self.tolerate_failures:
                for host in failedHosts:
                    self.hosts.remove(host)
            else:
                printError('Error instantiating some or all of the nodes. Bailing out...')
                if self.clean_after_failure:
                    self._runner.killInstances(self._runner.vmIds)
                return 128

        master_node = self.hosts[0]

        worker_nodes = list(self.hosts)

        worker_nodes.remove(master_node)

        printInfo('\tMaster is %s' % master_node.public_dns)

        for node in worker_nodes:
            printInfo('\tWorker: %s' % node.public_dns)

        # Configure the hosts
        printAction('Configuring nodes')

        # Try to install the missing packages
        if self.add_packages:
            self.doAddPackages(ssh)

        # For MPI clusters prepare the machinefile for mpirun
        if self.mpi_machine_file:
            self.doPrepareMPImachineFile(ssh, worker_nodes)

        if self.cluster_user:
            # Create a new user and prepare the environments for password-less ssh
            self.doCreateClusterUser(ssh, master_node)

        # Initialize the shared storage in NFS
        if self.shared_folder:
            self.doPrepareNFSSharedFolder(ssh, master_node, worker_nodes)

        if self.ssh_hostbased:
            self.doSetupSSHHostBasedCluster(ssh)

        # Update /etc/profile with StratusLab specific environment variables
        self.doUpdateEnvironmentVariables(ssh, master_node, worker_nodes)

        # Store the list of cluster nodes in a file under /tmp
        self.doPrepareNodeList(ssh, worker_nodes)

        # Update the /etc/hosts file for all hosts
        self.doUpdateHostsFile(ssh, master_node, worker_nodes)

        # Start any services defined in rc.cluster-services
        self.doStartClusterServices(ssh, master_node)

        return 0
 def _getTemplateFile(self, tpl, name):
     try:
         return Util.get_template_file([tpl])
     except:
         printError("%s template does not exist" % name)
         return tpl
Exemple #15
0
 def _installRubyGems(self):
     cmd = ['gem', 'install', '--no-rdoc', '--no-ri'] + self.ruby_gems 
     rc, output = self.system.executeCmdWithOutput(cmd)
     if rc != 0:
         printError("Failed to install Ruby gems: %s" % output)
 def _checkUuids(self):
     if not self.uuids:
         printError('A disk UUID must be supplied')
     if len(self.uuids) > 1:
         printError('Only one disk UUID can be specified')
     self.uuid = self.uuids[0]
 def _checkTagLength(self):
     if len(self.options.volumeTag) > self.TAG_LENGTH_MAX:
         printError('Tags must have less than %d characters' % self.TAG_LENGTH_MAX)
 def _checkUuid(self):
     for uuid in self.uuids:
         if not VolumeManager.isValidUuid(uuid):
             printError('Invalid UUID %s' % uuid)
 def _getUuid(self):
     if len(self.uuids) < 1:
         printError('At least one disk UUID is required')
Exemple #20
0
 def _executeOnNodeExitOnError(self, cmd_str):
     rc, output = self.system._nodeShell(cmd_str.split(' '),
                               withOutput=True, 
                               shell=True)
     if rc != 0:
         printError('Failed running: %s\n%s' % (cmd_str, output))
Exemple #21
0
 def _checkNodeConnectivity(self):
     if not self._nodeAlive():
         printError('Unable to connect the node %s' % self.nodeAddr)