예제 #1
0
파일: Testor.py 프로젝트: StratusLab/client
    def exceedCpuQuotaTest(self):
        """Start x instances, where x is the cpu quota +1, then stop them."""

        printInfo("Current cpu quota: %s, starting as many +1" % self.quotaCpu)
        try:
            self._startVm(instanceNumber=int(self.quotaCpu) + 1)
        except OneException, ex:
            message = "CPU quota exceeded (Quota: %s.0, Used: %s.0, Requested: 1.0)." % (self.quotaCpu, self.quotaCpu)
            self.assertTrue(message in str(ex), "Quota not working, got %s expected %s" % (ex.message, message))
예제 #2
0
파일: Testor.py 프로젝트: StratusLab/client
    def oneReportsErrorViaXmlRpcTest(self):
        """Test if ONE reports error messages via XML RPC"""

        info, vmId = self._startStopInvalidImage()

        try:
            errorMessage = info.attribs["template_error_message"]
        except KeyError:
            self.fail("No error message set.")
        else:
            self.failUnless(errorMessage, "Empty error message.")
            printInfo("VM %s failed with error message:\n%s" % (vmId, errorMessage))
예제 #3
0
파일: Testor.py 프로젝트: StratusLab/client
 def _localMount(self, device, mountPoint, options=[]):
     Util.printStep(
         "Mounting device %s on %s with options %s" % (device, mountPoint, (", ".join(options) or "<no options>"))
     )
     mountOptions = ["-o", ",".join(options)]
     try:
         mkdir(mountPoint)
     except:
         pass
     rc, output = Util.executeGetStatusOutput(
         ["mount"] + mountOptions + [device, mountPoint], verboseLevel=self.verboseLevel
     )
     if output:
         printInfo(output)
     return rc == 0 and True or False
예제 #4
0
파일: Testor.py 프로젝트: StratusLab/client
        try:
            creator.create()
        except Exception, e:
            try:
                creator._stopMachine()
            except:
                pass
            raise e
        finally:
            os.unlink(script_file)

        timeout = 1000
        t_stop = time.time() + timeout
        t_step = 10
        printInfo("Waiting %i sec for image bundling." % timeout)

        while time.time() < t_stop:
            if creator.getVmState() in ("Done", "Failed"):
                break
            printInfo("Time remaining: %d" % (t_stop - time.time()))
            time.sleep(t_step)

        # Assert instance state
        vm_state = creator.getVmState()
        if vm_state != "Done":
            self.configHolder.username = self.testUsername
            self.configHolder.password = self.testPassword
            self.configHolder.endpoint = self.endpoint
            monitor = Monitor(self.configHolder)
            info = monitor._vmDetail(creator.getVmId())
예제 #5
0
 def _printInstallCompleted(self, stdoutFilename, stderrFilename):
     printStep('Installation completed')
     printInfo('\tInstallation details: %s, %s' % (stdoutFilename, stderrFilename))
예제 #6
0
    def deploy(self):
        ssh = SSHUtil(self._runner.userPrivateKeyFile, self.cluster_admin)

        # Wait until all the images are up and running
        vmNetworkDetails = []
        vmStartTimeout = 600

        # wait until the each machine is up or timeout after 15 minutes
        printStep("Waiting for all cluster VMs to be instantiated...")
        if self._is_heterogeneous:
            printStep("Waiting for master")
            self._runner.waitUntilVmRunningOrTimeout(self._master_vmid, vmStartTimeout)
            vmNetworkDetails.append(self._runner.getNetworkDetail(self._master_vmid))

        for vmId in self._runner.vmIds:
            printDebug('Waiting for instance to start running %s' % str(vmId))
            self._runner.waitUntilVmRunningOrTimeout(vmId, vmStartTimeout)
            vmNetworkDetails.append(self._runner.getNetworkDetail(vmId))

        vm_cpu, vm_ram, vm_swap = self._runner.getInstanceResourceValues()

        for vmNetwork in vmNetworkDetails:
            if vmNetwork[0] == 'public':
                host = Host()
                host.public_ip = vmNetwork[1]

                try:
                    host.public_dns = socket.gethostbyaddr(host.public_ip)[0]
                except:
                    host.public_dns = host.public_ip

                host.cores = vm_cpu
                host.ram = vm_ram
                host.swap = vm_swap
                self.hosts.append(host)

        printStep("Waiting for all instances to become accessible...")

        failedHosts = []

        for host in self.hosts:
            hostReady = False
            hostFailed = False

            while not hostReady and not hostFailed:
                if not ssh.waitForConnectivity(host, vmStartTimeout):
                    printError('Timed out while connecting to %s.  Removing from target config. list.' % host.public_ip)
                    failedHosts.append(host)
                    hostFailed = True
                else:
                    hostReady = True

        if len(failedHosts) > 0:
            if self.tolerate_failures:
                for host in failedHosts:
                    self.hosts.remove(host)
            else:
                printError('Error instantiating some or all of the nodes. Bailing out...')
                if self.clean_after_failure:
                    self._runner.killInstances(self._runner.vmIds)
                return 128

        master_node = self.hosts[0]

        worker_nodes = list(self.hosts)

        worker_nodes.remove(master_node)

        printInfo('\tMaster is %s' % master_node.public_dns)

        for node in worker_nodes:
            printInfo('\tWorker: %s' % node.public_dns)

        # Configure the hosts
        printAction('Configuring nodes')

        # Try to install the missing packages
        if self.add_packages:
            self.doAddPackages(ssh)

        # For MPI clusters prepare the machinefile for mpirun
        if self.mpi_machine_file:
            self.doPrepareMPImachineFile(ssh, worker_nodes)

        if self.cluster_user:
            # Create a new user and prepare the environments for password-less ssh
            self.doCreateClusterUser(ssh, master_node)

        # Initialize the shared storage in NFS
        if self.shared_folder:
            self.doPrepareNFSSharedFolder(ssh, master_node, worker_nodes)

        if self.ssh_hostbased:
            self.doSetupSSHHostBasedCluster(ssh)

        # Update /etc/profile with StratusLab specific environment variables
        self.doUpdateEnvironmentVariables(ssh, master_node, worker_nodes)

        # Store the list of cluster nodes in a file under /tmp
        self.doPrepareNodeList(ssh, worker_nodes)

        # Update the /etc/hosts file for all hosts
        self.doUpdateHostsFile(ssh, master_node, worker_nodes)

        # Start any services defined in rc.cluster-services
        self.doStartClusterServices(ssh, master_node)

        return 0