Beispiel #1
0
    def findDisks(self):
        """	pass a scan script to the node, returning a list of unused disks in xml format """

        cfg.LOGGER.debug('%s getDisks scanning %s', time.asctime(),
                         self.nodeName)

        scriptName = os.path.join(cfg.PGMROOT, 'scripts/findDevs.py')

        cfg.LOGGER.debug('%s getDisks using script from %s', time.asctime(),
                         scriptName)

        # check if this is the local node, if so, use issueCMD not ssh
        if self.localNode:
            (rc, diskOut) = issueCMD(scriptName)
        else:
            sshTarget = SSHsession(self.userName, self.nodeName,
                                   self.userPassword)
            (rc, diskOut) = sshTarget.sshPython(scriptName)

        self.diskScanned = True

        if (rc == 0):

            diskData = str(diskOut[0])
            xmlDoc = ETree.fromstring(diskData)
            freeDisks = xmlDoc.findall('disk')
            sysInfo = xmlDoc.find('sysinfo')

            # Process the sysinfo data, and update the node's attributes
            self.kernelVersion = sysInfo.attrib['kernel']
            self.dmthinp = True if sysInfo.attrib['dmthinp'] == 'yes' else False
            self.btrfs = True if sysInfo.attrib['btrfs'] == 'yes' else False
            self.glusterVersion = sysInfo.attrib['glustervers']
            self.memGB = int(sysInfo.attrib['memsize']) / 1024**2
            self.cpuCount = int(sysInfo.attrib['cpucount'])
            self.raidCard = sysInfo.attrib['raidcard']
            self.osVersion = sysInfo.attrib['osversion']

            self.tunedProfiles = sysInfo.attrib['tunedprofiles'].split(',')

            # Process the disk information
            for disk in freeDisks:
                deviceName = disk.attrib['device']
                sizeMB = int(disk.attrib['sizeKB']) / 1024

                brick = Brick(self.nodeName, deviceName, sizeMB)
                if self.localNode:
                    brick.localDisk = True
                self.diskList[deviceName] = brick

        else:
            # Insert Scan failed logic here!
            pass

        cfg.LOGGER.debug('%s getDisks found %d devices on %s', time.asctime(),
                         len(self.diskList), self.nodeName)
Beispiel #2
0
	def findDisks(self):
		"""	pass a scan script to the node, returning a list of unused disks in xml format """

		cfg.LOGGER.debug('%s getDisks scanning %s', time.asctime(), self.nodeName)
		
		scriptName = os.path.join(cfg.PGMROOT,'scripts/findDevs.py')
		
		cfg.LOGGER.debug('%s getDisks using script from %s', time.asctime(), scriptName)

		# check if this is the local node, if so, use issueCMD not ssh
		if self.localNode:
			(rc, diskOut) =  issueCMD(scriptName)
		else:
			sshTarget = SSHsession(self.userName, self.nodeName, self.userPassword)
			(rc, diskOut) = sshTarget.sshPython(scriptName)
		
		self.diskScanned = True

		if ( rc == 0 ):
			
			diskData = str(diskOut[0])
			xmlDoc = ETree.fromstring(diskData)
			freeDisks = xmlDoc.findall('disk')
			sysInfo = xmlDoc.find('sysinfo')
			
			# Process the sysinfo data, and update the node's attributes
			self.kernelVersion = sysInfo.attrib['kernel']
			self.dmthinp = True if sysInfo.attrib['dmthinp'] == 'yes' else False
			self.btrfs = True if sysInfo.attrib['btrfs'] == 'yes' else False
			self.glusterVersion = sysInfo.attrib['glustervers']
			self.memGB = int(sysInfo.attrib['memsize']) / 1024**2
			self.cpuCount = int(sysInfo.attrib['cpucount'])
			self.raidCard = sysInfo.attrib['raidcard']
			self.osVersion = sysInfo.attrib['osversion']
			
			self.tunedProfiles = sysInfo.attrib['tunedprofiles'].split(',')
			
			# Process the disk information
			for disk in freeDisks:
				deviceName = disk.attrib['device']
				sizeMB = int(disk.attrib['sizeKB']) / 1024
				
				brick = Brick(self.nodeName, deviceName, sizeMB)
				if self.localNode:
					brick.localDisk = True
				self.diskList[deviceName] = brick
				

		else:
			# Insert Scan failed logic here!
			pass
		
				
		cfg.LOGGER.debug('%s getDisks found %d devices on %s', time.asctime(), len(self.diskList), self.nodeName)
Beispiel #3
0
def getHostIP():
	"""	Get a list of IPs the host has defined """

	hostIP = []

	(rc, ipInfo) = issueCMD("ip addr show")
	
	for dataLine in ipInfo:
		if 'inet ' in dataLine:
			interface = dataLine.split()[-1]
			if interface.startswith(cfg.NICPREFIX):
				dataLine = dataLine.replace('/',' ')
				hostIP.append(dataLine.split()[1])

	return hostIP
Beispiel #4
0
	def joinCluster(self):
		"""	run peer probe against this node """
		
		(rc, probeOutput) = issueCMD("gluster peer probe " + self.nodeName)
		
		if rc > 0:
			# update the clusterState properties
			cfg.LOGGER.debug("%s peer probe for %s failed (RC=%d)", time.asctime(), self.nodeName, rc)
			logErrorMsgs(probeOutput)
			self.inCluster = False
		else:
			# update the clusterState properties
			cfg.LOGGER.debug("%s peer probe for %s succeeded", time.asctime(), self.nodeName)
			self.inCluster = True
		pass
Beispiel #5
0
def getHostIP():
    """	Get a list of IPs the host has defined """

    hostIP = []

    (rc, ipInfo) = issueCMD("ip addr show")

    for dataLine in ipInfo:
        if 'inet ' in dataLine:
            interface = dataLine.split()[-1]
            if interface.startswith(cfg.NICPREFIX):
                dataLine = dataLine.replace('/', ' ')
                hostIP.append(dataLine.split()[1])

    return hostIP
Beispiel #6
0
def getSubnets():
	"""getSubnets returns a list of the IPv4 subnets available of the host"""
	
	subnetList = []

	(rc, ipInfo) = issueCMD("ip addr show")
	
	for dataLine in ipInfo:
		if 'inet ' in dataLine:
			interface = dataLine.split()[-1]
			if interface.startswith(cfg.NICPREFIX):
				IPdata = dataLine.split()[1]
				thisSubnet = calcSubnet(IPdata)
				subnetList.append(thisSubnet)
	
	return subnetList
Beispiel #7
0
def getSubnets():
    """getSubnets returns a list of the IPv4 subnets available of the host"""

    subnetList = []

    (rc, ipInfo) = issueCMD("ip addr show")

    for dataLine in ipInfo:
        if 'inet ' in dataLine:
            interface = dataLine.split()[-1]
            if interface.startswith(cfg.NICPREFIX):
                IPdata = dataLine.split()[1]
                thisSubnet = calcSubnet(IPdata)
                subnetList.append(thisSubnet)

    return subnetList
Beispiel #8
0
    def joinCluster(self):
        """	run peer probe against this node """

        (rc, probeOutput) = issueCMD("gluster peer probe " + self.nodeName)

        if rc > 0:
            # update the clusterState properties
            cfg.LOGGER.debug("%s peer probe for %s failed (RC=%d)",
                             time.asctime(), self.nodeName, rc)
            logErrorMsgs(probeOutput)
            self.inCluster = False
        else:
            # update the clusterState properties
            cfg.LOGGER.debug("%s peer probe for %s succeeded", time.asctime(),
                             self.nodeName)
            self.inCluster = True
        pass
def sshKeyOK():
	"""Ensure local ssh key is in place, if not create it 
	"""

	keyOK = False	
	
	if os.path.exists('/root/.ssh/id_rsa.pub'):
		keyOK = True
		cfg.LOGGER.info('%s root has an ssh key ready to push out', time.asctime())
	else:
		
		# Run ssh-keygen, in shell mode to generate the key i.e. use the 'True' parameter
		(rc, genOut) = issueCMD("ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa",True)

		for line in genOut:
			if 'Your public key has been saved' in line:
				cfg.LOGGER.info('%s SSH key has been generated successfully', time.asctime())
				keyOK = True
				break
	
	return keyOK
Beispiel #10
0
def sshKeyOK():
    """Ensure local ssh key is in place, if not create it 
	"""

    keyOK = False

    if os.path.exists('/root/.ssh/id_rsa.pub'):
        keyOK = True
        cfg.LOGGER.info('%s root has an ssh key ready to push out',
                        time.asctime())
    else:

        # Run ssh-keygen, in shell mode to generate the key i.e. use the 'True' parameter
        (rc, genOut) = issueCMD("ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa",
                                True)

        for line in genOut:
            if 'Your public key has been saved' in line:
                cfg.LOGGER.info('%s SSH key has been generated successfully',
                                time.asctime())
                keyOK = True
                break

    return keyOK
Beispiel #11
0
    def createVolume(self):
        """	function to define a gluster volume given an xml volume definition """

        cmdQueue = []

        # protocols
        NFSstate = 'nfs.disable off' if (self.protocols['nfs']
                                         == 'off') else 'nfs.disable on'
        CIFSstate = 'user.cifs enable' if (self.protocols['cifs']
                                           == 'on') else 'user.cifs disable'

        # create volume syntax
        createCMD = "gluster vol create " + self.volName + self.replicaParm + " transport tcp "
        for brick in self.bricks:
            createCMD += brick + "/" + self.volDirectory + " "

        #createCMD += " force"		# added to allow the root of the brick to be used (glusterfs 3.4)

        cmdQueue.append(createCMD)

        # Post Processing Options
        cmdQueue.append('gluster vol set ' + self.volName + ' ' + NFSstate)
        cmdQueue.append('gluster vol set ' + self.volName + ' ' + CIFSstate)

        if self.useCase == 'hadoop':
            # Added based on work done by Jeff Vance @ Red Hat
            cmdQueue.append('gluster vol set ' + self.volName +
                            ' quick-read off')
            cmdQueue.append('gluster vol set ' + self.volName +
                            ' cluster.eager-lock on')
            cmdQueue.append('gluster vol set ' + self.volName +
                            ' performance.stat-prefetch off')
            pass

        elif self.useCase == 'virtualisation':

            # look to see what type of virt target it is
            if self.virtTarget == 'glance':
                cmdQueue.append('gluster vol set ' + self.volName +
                                ' storage.owner-gid 161')
                cmdQueue.append('gluster vol set ' + self.volName +
                                ' storage.owner-uid 161')
                pass
            elif self.virtTarget == 'cinder':
                cmdQueue.append('gluster vol set ' + self.volName +
                                ' storage.owner-gid 165')
                cmdQueue.append('gluster vol set ' + self.volName +
                                ' storage.owner-uid 165')
                pass
            elif self.virtTarget == 'rhev':
                cmdQueue.append('gluster vol set ' + self.volName +
                                ' storage.owner-gid 36')
                cmdQueue.append('gluster vol set ' + self.volName +
                                ' storage.owner-uid 36')

            if (self.virtTarget in ['cinder', 'rhev']):

                # Is the virt group available to use
                if os.path.isfile('/var/lib/glusterd/groups/virt'):
                    cmdQueue.append('gluster vol set ' + self.volName +
                                    ' group virt')
                    pass
                else:
                    # Fallback settings if local virt group definition is not there
                    cmdQueue.append('gluster vol set ' + self.volName +
                                    ' quick-read  off')
                    cmdQueue.append('gluster vol set ' + self.volName +
                                    ' read-ahead  off')
                    cmdQueue.append('gluster vol set ' + self.volName +
                                    ' io-cache  off')
                    cmdQueue.append('gluster vol set ' + self.volName +
                                    ' stat-prefetch  off')
                    cmdQueue.append('gluster vol set ' + self.volName +
                                    ' eager-lock enable')
                    cmdQueue.append('gluster vol set ' + self.volName +
                                    ' remote-dio enable')
                    pass

        self.settings = list(cmdQueue)

        # Add volume start to the command sequence
        cmdQueue.append('gluster vol start ' + self.volName)

        # log the number of commands that will be run
        numCmds = len(cmdQueue)
        cfg.LOGGER.debug("%s Creating volume %s - %d steps", time.asctime(),
                         self.volName, numCmds)

        cfg.MSGSTACK.pushMsg(" ")
        cfg.MSGSTACK.pushMsg("Processing '%s'" % (self.volName))

        # Process the command sequence
        retCode = 0
        stepNum = 1

        # Pre CREATE phase ##############################################################
        # Volumes are defined on directories on the brick, so the first thing to do
        # is prepare the brick with the required directory

        for brick in self.bricks:
            (hostName, brickPath) = brick.split(':')
            node = cfg.CLUSTER.node[hostName]

            cfg.MSGSTACK.pushMsg("Creating directory on %s" % (hostName))
            cfg.LOGGER.info("%s Creating %s/%s directory on node %s",
                            time.asctime(), brickPath, self.volDirectory,
                            hostName)

            s = SSHsession(node.userName, node.nodeName)
            (rc,
             mkdirOut) = s.ssh('mkdir %s/%s' % (brickPath, self.volDirectory))

            if (rc > 0):

                cfg.MSGSTACK.pushMsg("Directory preparation failed")
                cfg.LOGGER.debug("%s step %d/%d successful", time.asctime(),
                                 stepNum, numCmds)

                retCode = 12
                break

        if retCode == 0:
            for cmd in cmdQueue:

                cmdType = ' '.join(cmd.split()[1:3]) + ' ...'
                cfg.MSGSTACK.pushMsg("Step %d/%d starting (%s)" %
                                     (stepNum, numCmds, cmdType))

                (rc, cmdOutput) = issueCMD(cmd)

                self.createMsgs += cmdOutput

                if rc == 0:  # retcode is 1st element, so check it's 0

                    # push this cmd to the queue for reporting in the UI
                    cfg.MSGSTACK.pushMsg("Step %d/%d completed" %
                                         (stepNum, numCmds))

                    # Log the cmd being run as successful
                    cfg.LOGGER.info("%s step %d/%d successful", time.asctime(),
                                    stepNum, numCmds)
                    cfg.LOGGER.debug("%s Command successful : %s",
                                     time.asctime(), cmd)

                else:
                    cfg.LOGGER.info("%s vol create step failed",
                                    time.asctime())

                    cfg.LOGGER.debug("%s command failure - %s", time.asctime(),
                                     cmd)

                    cfg.MSGSTACK.pushMsg(
                        "Step %d/%d failed - sequence aborted" %
                        (stepNum, numCmds))

                    # problem executing the command, log the response and return
                    retCode = 8
                    break

                stepNum += 1

        # post CREATE phase ###################################################
        #
        if retCode == 0:

            if self.useCase == 'hadoop':
                msg = "Mounting '%s' on all nodes" % (self.volName)
                cfg.LOGGER.debug(
                    "%s Post configuration for hadoop volume use case",
                    time.asctime())
                cfg.MSGSTACK.pushMsg("'hadoop' use case post processing...")

                retCode = self.mountVolume(
                )  # Mounts this volume across all nodes

            else:
                pass

        self.state = "Succeeded" if retCode == 0 else "Failed"

        self.retCode = retCode

        cfg.MSGSTACK.pushMsg("'%s' creation - %s" % (self.volName, self.state))
Beispiel #12
0
	def createVolume(self):
		"""	function to define a gluster volume given an xml volume definition """
	
		cmdQueue = []
		
		# protocols
		NFSstate  = 'nfs.disable off'  if ( self.protocols['nfs'] == 'off' )  else 'nfs.disable on'
		CIFSstate = 'user.cifs enable' if ( self.protocols['cifs'] == 'on' ) else 'user.cifs disable'
		
					
		# create volume syntax
		createCMD = "gluster vol create " + self.volName + self.replicaParm + " transport tcp "
		for brick in self.bricks:
			createCMD += brick + "/" + self.volDirectory + " "
		
		#createCMD += " force"		# added to allow the root of the brick to be used (glusterfs 3.4)
		
		cmdQueue.append(createCMD)
		
		# Post Processing Options
		cmdQueue.append('gluster vol set ' + self.volName + ' ' + NFSstate)
		cmdQueue.append('gluster vol set ' + self.volName + ' ' + CIFSstate)
		
		if self.useCase == 'hadoop':
			# Added based on work done by Jeff Vance @ Red Hat
			cmdQueue.append('gluster vol set ' + self.volName + ' quick-read off')
			cmdQueue.append('gluster vol set ' + self.volName + ' cluster.eager-lock on')
			cmdQueue.append('gluster vol set ' + self.volName + ' performance.stat-prefetch off')
			pass
			
		elif self.useCase == 'virtualisation':
			
			# look to see what type of virt target it is
			if self.virtTarget == 'glance':
				cmdQueue.append('gluster vol set ' + self.volName + ' storage.owner-gid 161')
				cmdQueue.append('gluster vol set ' + self.volName + ' storage.owner-uid 161')
				pass
			elif self.virtTarget == 'cinder':
				cmdQueue.append('gluster vol set ' + self.volName + ' storage.owner-gid 165')
				cmdQueue.append('gluster vol set ' + self.volName + ' storage.owner-uid 165')
				pass
			elif self.virtTarget == 'rhev':
				cmdQueue.append('gluster vol set ' + self.volName + ' storage.owner-gid 36')
				cmdQueue.append('gluster vol set ' + self.volName + ' storage.owner-uid 36')				
			
			if ( self.virtTarget in ['cinder','rhev'] ):
				
				# Is the virt group available to use
				if os.path.isfile('/var/lib/glusterd/groups/virt'):
					cmdQueue.append('gluster vol set ' + self.volName + ' group virt')
					pass
				else:
					# Fallback settings if local virt group definition is not there
					cmdQueue.append('gluster vol set ' + self.volName + ' quick-read  off')
					cmdQueue.append('gluster vol set ' + self.volName + ' read-ahead  off')
					cmdQueue.append('gluster vol set ' + self.volName + ' io-cache  off')
					cmdQueue.append('gluster vol set ' + self.volName + ' stat-prefetch  off')
					cmdQueue.append('gluster vol set ' + self.volName + ' eager-lock enable')
					cmdQueue.append('gluster vol set ' + self.volName + ' remote-dio enable')
					pass
				
		self.settings = list(cmdQueue)
			
		# Add volume start to the command sequence
		cmdQueue.append('gluster vol start ' + self.volName)
	
		# log the number of commands that will be run
		numCmds = len(cmdQueue)
		cfg.LOGGER.debug("%s Creating volume %s - %d steps", time.asctime(), self.volName, numCmds)
		
		cfg.MSGSTACK.pushMsg(" ")
		cfg.MSGSTACK.pushMsg("Processing '%s'"%(self.volName))
		
		# Process the command sequence	
		retCode = 0
		stepNum = 1
		
		
		# Pre CREATE phase ##############################################################
		# Volumes are defined on directories on the brick, so the first thing to do 
		# is prepare the brick with the required directory

		for brick in self.bricks:
			(hostName, brickPath) = brick.split(':')
			node = cfg.CLUSTER.node[hostName]
			
			cfg.MSGSTACK.pushMsg("Creating directory on %s" %(hostName))
			cfg.LOGGER.info("%s Creating %s/%s directory on node %s", time.asctime(), brickPath, self.volDirectory, hostName)
			
			s=SSHsession(node.userName, node.nodeName)
			(rc, mkdirOut) = s.ssh('mkdir %s/%s'%(brickPath, self.volDirectory))
			
			if ( rc > 0):
				
				cfg.MSGSTACK.pushMsg("Directory preparation failed")
				cfg.LOGGER.debug("%s step %d/%d successful", time.asctime(), stepNum, numCmds)
				
				retCode = 12
				break
			
		
		if retCode == 0:
			for cmd in cmdQueue:
				
				cmdType = ' '.join(cmd.split()[1:3]) + ' ...'
				cfg.MSGSTACK.pushMsg("Step %d/%d starting (%s)" %(stepNum, numCmds,cmdType))
				
				(rc, cmdOutput) = issueCMD(cmd)
				
				self.createMsgs += cmdOutput
				
				if rc == 0:	# retcode is 1st element, so check it's 0
								
					# push this cmd to the queue for reporting in the UI
					cfg.MSGSTACK.pushMsg("Step %d/%d completed" %(stepNum, numCmds))
					
					# Log the cmd being run as successful
					cfg.LOGGER.info("%s step %d/%d successful", time.asctime(), stepNum, numCmds)
					cfg.LOGGER.debug("%s Command successful : %s", time.asctime(), cmd)
					
				else:
					cfg.LOGGER.info("%s vol create step failed", time.asctime())
					
					cfg.LOGGER.debug("%s command failure - %s", time.asctime(), cmd)
					
					cfg.MSGSTACK.pushMsg("Step %d/%d failed - sequence aborted" %(stepNum, numCmds))
					
					# problem executing the command, log the response and return
					retCode = 8
					break
					
				stepNum +=1
	
	
		# post CREATE phase ###################################################
		#
		if retCode == 0:
		
			if self.useCase == 'hadoop':
				msg = "Mounting '%s' on all nodes"%(self.volName)
				cfg.LOGGER.debug("%s Post configuration for hadoop volume use case", time.asctime())
				cfg.MSGSTACK.pushMsg("'hadoop' use case post processing...")
				
				retCode = self.mountVolume()	# Mounts this volume across all nodes
				
			else:
				pass
			
			
		
		
		
		
		self.state = "Succeeded" if retCode == 0 else "Failed"
		
		self.retCode = retCode
		
		cfg.MSGSTACK.pushMsg("'%s' creation - %s"%(self.volName, self.state))