def action_post_install(self, *args, **kwargs): super().action_post_install(*args, **kwargs) # # Cache Ganglia packages from EPEL # os.system(os.path.join(self.install_path, 'bin', "dl.sh")) kit = KitManager().getKit('ganglia') kit_repo_dir = os.path.join( self.config_manager.getReposDir(), kit.getKitRepoDir() ) cmd = 'rsync -a {}/ {}'.format('/var/cache/tortuga/pkgs/ganglia', kit_repo_dir) tortugaSubprocess.executeCommandAndIgnoreFailure(cmd) cmd = 'cd %s; createrepo .'.format(kit_repo_dir) tortugaSubprocess.executeCommandAndIgnoreFailure(cmd) # # Copy default configuration file into place # dst_path = os.path.join(self.config_manager.getKitConfigBase(), 'ganglia') if not os.path.exists(dst_path): os.makedirs(dst_path) shutil.copyfile( os.path.join(self.files_path, 'gmond-component.conf'), os.path.join(dst_path, 'gmond-component.conf') )
def action_post_install(self, *args, **kwargs): super().action_post_install(*args, **kwargs) # # Cache Munin packages and dependencies from EPEL # os.system(os.path.join(self.install_path, 'bin', "dl.sh")) k = KitManager().getKit('munin') kit_repo_dir = os.path.join( self.config_manager.getReposDir(), k.getKitRepoDir() ) # # If cache directory exists, copy to 'kitRepoDir' (the default # location for kit packages) # if os.path.isdir('/var/cache/tortuga/pkgs/munin'): tortugaSubprocess.executeCommandAndIgnoreFailure( 'rsync -a {}/ {}'.format( '/var/cache/tortuga/pkgs/munin', kit_repo_dir) ) tortugaSubprocess.executeCommandAndIgnoreFailure( 'createrepo {}'.format(kit_repo_dir))
def runCommand(self): self.parseArgs() with DbManager().session() as session: dbNode = NodesDbHandler().getNode(session, self._cm.getInstaller()) # Validate device name NetworkDevicesDbHandler().getNetworkDevice(session, self.getArgs().nic) # Ensure it is a provisioning NIC that is being deleted dbInstallerNic: Nic = None for dbInstallerNic in dbNode.hardwareprofile.nics: if dbInstallerNic.networkdevice.name == self.getArgs().nic: break else: raise NicNotFound('NIC [%s] is not a provisioning NIC' % (self.getArgs().nic)) hardwareProfiles = [ entry.hardwareprofile for entry in dbInstallerNic.network.hardwareprofilenetworks if entry.hardwareprofile != dbNode.hardwareprofile ] if hardwareProfiles: raise Exception('Hardware profile(s) are associated with this' ' provisioning NIC: [%s]' % (' '.join([hp.name for hp in hardwareProfiles]))) session.query( HardwareProfileNetwork).filter( HardwareProfileNetwork.network == dbInstallerNic.network).\ delete() session.query(HardwareProfileProvisioningNic).filter( HardwareProfileProvisioningNic.nic == dbInstallerNic).delete() dbNetworkId = dbInstallerNic.network.id networkDeviceId = dbInstallerNic.networkdevice.id session.delete(dbInstallerNic) session.query(Network).filter(Network.id == dbNetworkId).delete() self._deleteNetworkDevice(session, networkDeviceId) session.commit() bUpdated = self._updateNetworkConfig(session, dbNode) if bUpdated and self.getArgs().bSync: print('Applying changes to Tortuga...') cmd = 'puppet agent --onetime --no-daemonize >/dev/null 2>&1' tortugaSubprocess.executeCommandAndIgnoreFailure(cmd)
def _uninstall_puppet_module(self, module_name): """ Uninstalls a puppet module from the kit puppet_modules directory. :param module_name: The name of the puppet module to uninstall. """ cmd = ('/opt/puppetlabs/bin/puppet module uninstall' ' --color false --ignore-changes {}'.format(module_name)) tortugaSubprocess.executeCommandAndIgnoreFailure(cmd)
def install_wheel_matching_filespec(self, whl_pathspec): # Find an whl matching the filespec whl_files = glob.glob(whl_pathspec) if not whl_files: raise FileNotFound('No files found matching spec %s' % (whl_pathspec)) # Use the first whl file found cmd = '%s/pip install %s' % (self._config.getBinDir(), whl_files[0]) tortugaSubprocess.executeCommandAndIgnoreFailure(cmd)
def prepSudo(self): """ Setup sudo. """ self._logger.info('Setting up \'sudo\'') # TODO: these scripts have 'apache' hardcoded right now sysManager = self._osObjectFactory.getOsSysManager() sudoInitScript = sysManager.getSudoInitScript() p = tortugaSubprocess.executeCommandAndIgnoreFailure(sudoInitScript) self._logger.debug('sudo init output:\n%s' % p.getStdOut())
def hookAction(self, action, nodes, args=None): ''' WARNING: this method may be subject to scalability concerns if batch node operations are implemented. ''' hookScript = self.hookScript if not hookScript: return nodeArg = nodes if not isinstance(nodes, list) else ','.join(nodes) cmd = '%s %s' % (hookScript, action) if args: cmd += ' %s' % (args) cmd += ' %s' % (nodeArg) tortugaSubprocess.executeCommandAndIgnoreFailure(cmd)
def _updateNetworkConfig(self, session, dbInstallerNode): """ Returns True if configuration files were changed. """ bUpdated = False bin_dir = os.path.dirname(sys.argv[0]) # Update dhcpd configuration if self._componentEnabled(session, dbInstallerNode.softwareprofile, 'dhcpd'): print('Updating dhcpd configuration...') tortugaSubprocess.executeCommand('{} {}'.format( os.path.join(bin_dir, 'genconfig'), 'dhcpd')) tortugaSubprocess.executeCommand('service dhcpd restart') bUpdated = True # Update DNS configuration after adding a provisioning NIC if self._componentEnabled(session, dbInstallerNode.softwareprofile, 'dns'): print('Updating DNS configuration...') tortugaSubprocess.executeCommand('{} {}'.format( os.path.join(bin_dir, 'genconfig'), 'dns')) # Because the entire configuration changes between before and # after there was a provisioning NIC installed, it is necessary # to restart the server. An 'rndc reload' will *NOT* suffice. tortugaSubprocess.executeCommandAndIgnoreFailure( 'service named restart') bUpdated = True return bUpdated
def _schedule_update(self): hostname = socket.gethostname().split('.')[0] cmd = 'mco puppet --json --no-progress runonce -I {}'.format(hostname) tortugaSubprocess.executeCommandAndIgnoreFailure(cmd)
def executeAndIgnoreFailure(self, cmd): return tortugaSubprocess.executeCommandAndIgnoreFailure(cmd)
def _addNic(self, session, nicName): # Get IP address and netmask using facter facterNicName = nicName.replace(':', '_').replace('.', '_') entries = [ 'ipaddress_%s' % (facterNicName), 'netmask_%s' % (facterNicName), 'macaddress_%s' % (facterNicName), 'network_%s' % (facterNicName) ] d = self._getMultipleFacterEntries(entries) if not 'ipaddress_%s' % (facterNicName) in d or \ not d['ipaddress_%s' % (facterNicName)]: if not self.getArgs().ipaddress: raise InvalidCliRequest( 'Unable to determine IP address, use command-line' ' override') ipaddress = self.getArgs().ipaddress else: ipaddress = d['ipaddress_%s' % (facterNicName)] if not 'netmask_%s' % (facterNicName) in d or \ not d['netmask_%s' % (facterNicName)]: if not self.getArgs().netmask: raise InvalidCliRequest( 'Unable to determine netmask, use command-line' ' override') netmask = self.getArgs().netmask else: netmask = d['netmask_%s' % (facterNicName)] if not 'network_%s' % (facterNicName) in d or \ not d['network_%s' % (facterNicName)]: if not self.getArgs().network: raise InvalidCliRequest( 'Unable to determine network, use command-line' ' override') network = self.getArgs().network else: network = d['network_%s' % (facterNicName)] # Check if nic is the default gateway as well... self._check_default_gateway_nic(nicName) dbNetwork = None # Attempt to find matching network try: dbNetwork = session.query(Network).filter( and_(Network.address == network, Network.netmask == netmask)).one() print('Found network [%s/%s]' % (dbNetwork.address, dbNetwork.netmask)) except NoResultFound: # Network is not known to Tortuga, add it pass if dbNetwork is None: print('Adding network [%s/%s]' % (network, netmask)) dbNetwork = self._addNetwork(nicName, network, netmask, session) # Attempt to find entry in NetworkDevices dbNetworkDevice = self._getNetworkDevice(nicName, session) if not dbNetworkDevice: # Create network device print('Adding network device [%s] as provisioning NIC' % (nicName)) dbNetworkDevice = self._addNetworkDevice(nicName, session) else: print('Found existing network device [%s]' % (nicName)) dbNode = NodesDbHandler().getNode(session, self._cm.getInstaller()) # Attempt to find Nics entry for dbNic in dbNode.nics: if dbNic.networkdevice.name == nicName.lower(): print('Found existing NIC entry for [%s]' % (dbNic.networkdevice.name)) break else: print('Creating NIC entry for [%s]' % (dbNetworkDevice.name)) dbNic = Nic() dbNic.networkdevice = dbNetworkDevice dbNic.ip = ipaddress dbNic.boot = True dbNic.network = dbNetwork dbNode.nics.append(dbNic) # Attempt to find NIC association with hardware profile (commonly # known as hardware profile provisioning NIC) for dbHwProfileNic in dbNode.hardwareprofile.nics: if dbHwProfileNic == dbNic: break else: print('Adding NIC [%s] to hardware profile [%s]' % (dbNic.networkdevice.name, dbNode.hardwareprofile.name)) dbNode.hardwareprofile.nics.append(dbNic) # Attempt to find 'HardwareProfileNetworks' entry for dbHardwareProfileNetwork in \ dbNode.hardwareprofile.hardwareprofilenetworks: if dbHardwareProfileNetwork.network == dbNetwork and \ dbHardwareProfileNetwork.networkdevice == dbNetworkDevice: print('Found existing hardware profile/network association') break else: dbHardwareProfileNetwork = HardwareProfileNetwork() dbHardwareProfileNetwork.network = dbNetwork dbHardwareProfileNetwork.networkdevice = dbNetworkDevice dbNode.hardwareprofile.hardwareprofilenetworks.append( dbHardwareProfileNetwork) session.commit() bUpdated = self._updateNetworkConfig(session, dbNode) if bUpdated and self.getArgs().bSync: print('Applying changes to Tortuga...') cmd = ('/opt/puppetlabs/bin/puppet agent --onetime' ' --no-daemonize >/dev/null 2>&1') tortugaSubprocess.executeCommandAndIgnoreFailure(cmd)
def uninstall_wheel(self, wheel_name): cmd = 'pip uninstall %s' % (wheel_name) tortugaSubprocess.executeCommandAndIgnoreFailure(cmd)
def uninstallPuppetModule(self, moduleName): cmd = ('/opt/puppetlabs/bin/puppet module uninstall' ' --color false --ignore-changes %s' % (moduleName)) tortugaSubprocess.executeCommandAndIgnoreFailure(cmd)