def __init__(self, req): self.__req = req self.__pmgr = PlaybookMgr( req['cluster_name'] ) schema = { "type":"object", "properties": { "infinidbconfig": { "items": { "type":"object", "properties":{ "xml_section":{ "type":"string", "required":False }, "xml_parameter": { "type":"string", "required":False }, "default":{ "type":"string", "required":False }, "description":{ "type":"string", "blank": False }, "em_category":{ "type":"string", "required": False }, "em_parameter":{ "type":"string", "required": False }, "value":{ "type":"string", "required": False } } }, "minItems": 1 } } } props = properties.Properties() filepath = '%s/config.json'% props['emtools.confdir'] #print filepath try: mapfile = open( filepath ) self.__data = json.load( mapfile ) validator = validictory.validator.SchemaValidator(blank_by_default=True) validator.validate( self.__data, schema ) except Exception, exc: msg = 'Error loading config.json: %s' % exc e = errormsg.ErrorMsg_from_parms(msg=msg) print e sys.exit(1)
def testRun(self): p = PlaybookMgr( 'testbook' ) p.write_inventory( 'testinv', { 'all' : ['localhost'] } ) f = open( props['emtools.test.sshkeyfile'] ) keytext = ''.join(f.readlines()) p.config_ssh( props['emtools.test.user'], keytext ) rc = p.run_module('testinv', 'all', 'setup', sudo=False) self.assertTrue( rc['contacted'].has_key( 'localhost' )) rc, results, out, err = p.run_playbook('smokecheck.yml', 'testinv') self.assertEquals(rc, 0) shutil.rmtree( p.get_rootdir() )
class ConsoleRunner(object): def __init__(self, cmd): self.__cmd = cmd self.__pmgr = PlaybookMgr(cmd['cluster_name']) def run(self): cmdstr = '' if self.__cmd['command'] == 'gettablelocks': cmdstr = cmdstr + '{{ infinidb_installdir }}/bin/viewtablelock' else: cmdstr = cmdstr + '{{ infinidb_installdir }}/bin/calpontConsole %s' % self.__cmd[ 'command'] try: reslt = self.__pmgr.run_module('infinidb', 'pm1', 'command', cmdstr, sudo=False) except errormsg.ErrorMsg, exc: replydict = { "cluster_name": self.__cmd['cluster_name'], "command": self.__cmd['command'], "console_host": '', "rc": exc['rc'], "stdout": exc['stdout'], "stderr": exc['stderr'], "msg": exc['msg'], "ansible_cmd": exc['cmd'] } return commandreply.CommandReply_from_dict(replydict) host = reslt['contacted'].keys()[0] replydict = { "cluster_name": self.__cmd['cluster_name'], "command": self.__cmd['command'], "console_host": host, "rc": reslt['contacted'][host]['rc'], "stdout": reslt['contacted'][host]['stdout'], "stderr": reslt['contacted'][host]['stderr'] } try: fn = getattr(console, self.__cmd['command']) replydict['results'] = fn(reslt['contacted'][host]['stdout']) except: pass return commandreply.CommandReply_from_dict(replydict)
class ConsoleRunner(object): def __init__(self, cmd): self.__cmd = cmd self.__pmgr = PlaybookMgr( cmd['cluster_name'] ) def run(self): cmdstr = '' if self.__cmd['command'] == 'gettablelocks': cmdstr = cmdstr + '{{ infinidb_installdir }}/bin/viewtablelock' else: cmdstr = cmdstr + '{{ infinidb_installdir }}/bin/calpontConsole %s' % self.__cmd['command'] try: reslt = self.__pmgr.run_module( 'infinidb', 'pm1', 'command', cmdstr, sudo=False) except errormsg.ErrorMsg, exc: replydict = { "cluster_name" : self.__cmd['cluster_name'], "command" : self.__cmd['command'], "console_host" : '', "rc" : exc['rc'], "stdout" : exc['stdout'], "stderr" : exc['stderr'], "msg" : exc['msg'], "ansible_cmd" : exc['cmd'] } return commandreply.CommandReply_from_dict( replydict ) host = reslt['contacted'].keys()[0] replydict = { "cluster_name" : self.__cmd['cluster_name'], "command" : self.__cmd['command'], "console_host" : host, "rc" : reslt['contacted'][host]['rc'], "stdout" : reslt['contacted'][host]['stdout'], "stderr" : reslt['contacted'][host]['stderr'] } try: fn = getattr(console,self.__cmd['command']) replydict['results'] = fn(reslt['contacted'][host]['stdout']) except: pass return commandreply.CommandReply_from_dict( replydict )
def __init__(self, req): self.__req = req self.__pmgr = PlaybookMgr( req['cluster_name'] ) # TODO - add support for ssh_pass ssh_port = None if req.has_key('ssh_port'): ssh_port = req['ssh_port'] if req.has_key('ssh_key'): self.__pmgr.config_ssh( req['ssh_user'], req['ssh_key'], ssh_port=ssh_port ) else: self.__pmgr.config_ssh( req['ssh_user'], ssh_pass=req['ssh_pass'], ssh_port=ssh_port ) self.__pmgr.write_inventory( 'default', { 'all' : req['hostnames'] }) self.__role_info = {} self.__instance_info = {} self.__parsed_idbxml = False self.__user = req['ssh_user']
def run_cmd(self): ''' Prepare and run the ansible playbook command for the operation type specified in the constructor ''' self._rundir = self._cluster.get_vmi()._rundir emboxtype = self._cluster.config()['em']['boxtype'] if not vagboxes.em_support(emboxtype): # Don't even try to install if the boxtype does not support an EM print 'supported is %s' % vagboxes.list_all(flags=vagboxes.FLAG_EM) Log.error("boxtype %s does not support the EM!" % emboxtype) return 1 self._pkgtype = vagboxes.get_default_pkgtype(emboxtype) vmgr = emvmgr.EMVersionManager() (emversion, self._pkgfile) = vmgr.retrieve(self._cluster.config()['em']['version'], self._pkgtype) self._pkgname = os.path.split( self._pkgfile )[1] (ansible_yml,cmdargs) = self._prepare_playbook_install() extra_playdir = self._cluster.get_extra_playbook_dir() p = PlaybookMgr( os.path.basename(self._rundir), extra_playdir ) # create ansible inventory file with list of hosts emrole = self._cluster.config()['em']['role'] iplist = [ self._cluster.machine(emrole)['ip'] ] ipdict = { 'all' : iplist } p.write_inventory( 'emdefault', ipdict ) # create ansible.cfg file self._idbuser = self._cluster.config()['idbuser'] f = open( common.props['vmi.vagrantvmi.sshkey'] ) keytext = ''.join(f.readlines()) p.config_ssh( self._idbuser, keytext ) # execute playbook thru PlaybookMgr Log.info("Running %s EM pkg install playbook; --extra-vars=%s" % (ansible_yml,cmdargs)) rc, results, out, err = p.run_playbook(ansible_yml, 'emdefault', playbook_args=cmdargs) return rc
def main(argv): ''' main function ''' try: opts, args = getopt.getopt(argv, "hvi", ['json=']) except getopt.GetoptError: usage() sys.exit(2) # defaults use_stdin = False json_file = '' for o,a in opts: if o == '-h': usage() sys.exit(2) elif o == '-v': print 'runplaybook.py Version: %s' % version sys.exit(1) elif o == '-i': use_stdin = True elif o == '--json': json_file = a else: print 'unsupported option: %s' % o usage() sys.exit(2) if (use_stdin and json_file) or (not use_stdin and not json_file): print 'ERROR: Must specify exactly one of -i or --json' usage() sys.exit(2) jsonstr = None if use_stdin: lines = sys.stdin.readlines() jsonstr = ''.join(lines) elif json_file: f = open( json_file ) lines = f.readlines() jsonstr = ''.join(lines) # debug only # print '%s' % req try: req = playbookreq.PlaybookRequest( jsonstr ) Log = logutils.getLogger('runplaybook') Log.info('request: %s' % req.json_dumps()) pmgr = PlaybookMgr( req['cluster_name'] ) rc, results, out, err = pmgr.run_playbook(req['playbook_info']['name'], 'infinidb', req['playbook_info']['hostspec'], playbook_args=req['playbook_info']['extravars']) preply_dict = { 'cluster_name' : req['cluster_name'], 'playbook_info' : req['playbook_info'], 'rc' : rc, 'stdout' : out, 'stderr' : err, 'recap_info' : results } preply = playbookreply.PlaybookReply_from_dict(preply_dict) Log.info('reply: %s' % preply.json_dumps()) print preply return rc except: import traceback print errormsg.ErrorMsg_from_parms( msg=json.dumps( traceback.format_exc() ) ) sys.exit(1)
def testSuccess(self): p = PlaybookMgr( 'testbook' ) p.write_inventory( 'testinv', { 'all' : ['foo.calpont.com', 'bar.calpont.com'] } ) inv = p.read_inventory( 'testinv' ) self.assertEquals(inv['all'][0], 'foo.calpont.com') self.assertEquals(inv['all'][1], 'bar.calpont.com') ref_file = '%s/test_testinv' % os.path.dirname(__file__) self.assertTrue( testutils.file_compare(ref_file, '%s/testinv' % p.get_rootdir())) p.config_ssh( 'root', 'some_key_data1234567890' ) ref_file = '%s/test_ansible.cfg' % os.path.dirname(__file__) self.assertTrue( testutils.file_compare(ref_file, '%s/ansible.cfg' % p.get_rootdir())) ref_file = '%s/test_private_key' % os.path.dirname(__file__) self.assertTrue( testutils.file_compare(ref_file, '%s/.ssh/private_key' % p.get_rootdir())) shutil.rmtree( p.get_rootdir() )
# MA 02110-1301, USA. ''' Created on Feb 3, 2014 @author: bwilkinson ''' from emtools.playbookmgr import PlaybookMgr import emtools.factreq as factreq import emtools.factreply as factreply import emtools.idbxml as idbxml import os if __name__ == '__main__': p = PlaybookMgr('cdh-head') ssh_key = ''' -----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA0NQO/C7ir5tgqrQVX6utjGxrhCqywzWuB80JOivOugVCzDqejHmI/nu8EPu3 F+3LmpI34jXh8+AhyL8Dn7Cdw43++fx8zAGPSalOygfRDENEWptCey6R5xQw4GxoyxSy8DfmG/yj 7IXWNojjrR5xkewKSS7xgDjiNXcFdC0euCsRihTG27d3PZBW9ayEEwMeXThjAZEuT/SW+6WdPWsb 9w9w5vvYWhQtr1kKeaHojnwMqxfnjiTVIs66ck2dwH6mjN0i/PI+zFZW5YF5zuPczr92fiPMnsum 8iAbGUubrqM/SIjkoO4P5vp7hYTXaVM3Kn41Sqj4z8SlDJ4WTBgqDQIDAQABAoIBAFa8zeCXRNa1 zef5VqtfLn2WBu5locyNPlTFKCD+UyZWyxDzBCnKzUkOceYH91u8DIaOVyHhSZG3NbEhDctFW7H/ B7oj0l4WA8MPzMcDiiPyyLBtrqZliHqXm1mMDdbUKSK3xR84x4mVaY1LPG4KqBd5GCifk/WzKtoU LrK7jvflQ3oqjVTrIifvZYIh4C4NaamotjztIZPtwsTDiIYk0SjxqJQytswIiQt/wPHOR3uHpyQ8 Ak6SNzeWtV3ghftjLJDCCyb32fE2j15Tw4bNimhS2Z34kG5INLSy5plwfPVRBPP2kUJk3KVZtpo9 foG9VxFDiwAve0jmDQKpacdjYS0CgYEA72Bdj+xWrUElSm+OPuhTuo7/Y/lod4RrnBGwPeyng6E2 4v/aZwBc8eIxZNtb43VZm4YO0iJFr+bf4i5I7/ptvp2iskRAskcUpUkCymhFr4DxDcpamJjdV2vr AdD8bLvd2NVmrOpCWHvbKVrQTyNeKeBJE8mnRFsaCcRZuokaJXsCgYEA31Sb/kclXHq0v3LywKrY
# You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. #!/usr/bin/env python ''' playbooksync.py synchronize a playbook from the current template ''' import getopt import os, sys from emtools.playbookmgr import PlaybookMgr #------------------------------------------------------------------------------- # main entry point #------------------------------------------------------------------------------- if __name__ == '__main__': if len(sys.argv) < 2: print "usage: playbooksync.py <cluster-name>" sys.exit(1) cluster = sys.argv[1] pmgr = PlaybookMgr( cluster ) print 'Playbook for %s is synced' % cluster sys.exit(0)
def main(argv): ''' main function ''' try: opts, args = getopt.getopt(argv, "hvi", ['json=']) except getopt.GetoptError: usage() sys.exit(2) # defaults use_stdin = False json_file = '' for o, a in opts: if o == '-h': usage() sys.exit(2) elif o == '-v': print 'runplaybook.py Version: %s' % version sys.exit(1) elif o == '-i': use_stdin = True elif o == '--json': json_file = a else: print 'unsupported option: %s' % o usage() sys.exit(2) if (use_stdin and json_file) or (not use_stdin and not json_file): print 'ERROR: Must specify exactly one of -i or --json' usage() sys.exit(2) jsonstr = None if use_stdin: lines = sys.stdin.readlines() jsonstr = ''.join(lines) elif json_file: f = open(json_file) lines = f.readlines() jsonstr = ''.join(lines) # debug only # print '%s' % req try: req = playbookreq.PlaybookRequest(jsonstr) Log = logutils.getLogger('runplaybook') Log.info('request: %s' % req.json_dumps()) pmgr = PlaybookMgr(req['cluster_name']) rc, results, out, err = pmgr.run_playbook( req['playbook_info']['name'], 'infinidb', req['playbook_info']['hostspec'], playbook_args=req['playbook_info']['extravars']) preply_dict = { 'cluster_name': req['cluster_name'], 'playbook_info': req['playbook_info'], 'rc': rc, 'stdout': out, 'stderr': err, 'recap_info': results } preply = playbookreply.PlaybookReply_from_dict(preply_dict) Log.info('reply: %s' % preply.json_dumps()) print preply return rc except: import traceback print errormsg.ErrorMsg_from_parms( msg=json.dumps(traceback.format_exc())) sys.exit(1)
def run_cmd(self): ''' Prepare and run the ansible playbook command for the operation type specified in the constructor ''' self._rundir = self._cluster.get_rundir() self._pkgdir = self._cluster.get_pkgdir() self._pkgfile = self._cluster.get_pkgfile() self._idbuser = self._cluster.config()['idbuser'] eflag = self._cluster.config()['enterprise'] if eflag: self._entflag = "true" else: self._entflag = "false" self._version = self._pkgfilenameparser.get_pkg_version(self._pkgfile) self._hadoop = self._cluster.config()['hadoop'] self._hdfsflag = "false" if self._hadoop: self._hdfsflag = "true" self._upgfile = self._cluster.get_upgfile() self._upgversion = None if self._upgfile: self._upgversion = self._pkgfilenameparser.get_pkg_version( self._upgfile) m = self._cluster.machine('pm1') self._pm1_ip = m['ip'] self._postconfig_opts = self._cluster.get_postconfig_opts() # Add -em to postconfig flags for version 4.6 and up if self._optype == 'pkginstall': if ConfigSpec._version_greaterthan(self._version, '4.6.0-0'): self._postconfig_opts += " -em" (ansible_yml, cmdargs) = self._prepare_playbook_pkginstall() elif self._optype == 'pkgupgrade': if ConfigSpec._version_greaterthan(self._upgversion, '4.6.0-0'): self._postconfig_opts += " -em" (ansible_yml, cmdargs) = self._prepare_playbook_pkgupgrade() elif self._optype == 'bininstall': if ConfigSpec._version_greaterthan(self._version, '4.6.0-0'): self._postconfig_opts += " -em" (ansible_yml, cmdargs) = self._prepare_playbook_bininstall() elif self._optype == 'binupgrade': if ConfigSpec._version_greaterthan(self._upgversion, '4.6.0-0'): self._postconfig_opts += " -em" (ansible_yml, cmdargs) = self._prepare_playbook_binupgrade() else: raise Exception("Unsupported ansible playbook type to run: %s" % self._optype) extra_playdir = self._cluster.get_extra_playbook_dir() p = PlaybookMgr(os.path.basename(self._rundir), extra_playdir) # create ansible inventory file with list of hosts; # should already exist for an EM-triggered install. full_inv_file = '%s/%s' % (p.get_rootdir(), self._inventory_filename) if not os.path.exists(full_inv_file): machines = self._cluster.machines() iplist = [] infnodelist = [] for key in machines: m = machines[key] iplist.append(m['ip']) # if we are using the EM in invm mode we don't want that # node to participate in the normal InfiniDB install if key != 'em1': #f.write("key: %s.calpont.com; ip: %s\n" % (key,m['ip'])) infnodelist.append(m['ip']) ipdict = {'all': iplist, 'infinidb_nodes': infnodelist} p.write_inventory(self._inventory_filename, ipdict) # create ansible.cfg file; # should already exist for an EM-triggered install. full_ans_file = '%s/%s' % (p.get_rootdir(), 'ansible.cfg') if not os.path.exists(full_ans_file): keytext = self._cluster.get_sshkey_text() p.config_ssh(self._idbuser, keytext) # execute playbook thru PlaybookMgr self._ansible_file = ansible_yml self._extra_vars = cmdargs rc, results, out, err = p.run_playbook(ansible_yml, self._inventory_filename, playbook_args=cmdargs) return rc, results, out, err
def __init__(self, cmd): self.__cmd = cmd self.__pmgr = PlaybookMgr(cmd['cluster_name'])
''' Created on Feb 3, 2014 @author: bwilkinson ''' from emtools.playbookmgr import PlaybookMgr import emtools.factreq as factreq import emtools.factreply as factreply import emtools.idbxml as idbxml import os if __name__ == '__main__': p = PlaybookMgr( 'cdh-head' ) ssh_key = ''' -----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA0NQO/C7ir5tgqrQVX6utjGxrhCqywzWuB80JOivOugVCzDqejHmI/nu8EPu3 F+3LmpI34jXh8+AhyL8Dn7Cdw43++fx8zAGPSalOygfRDENEWptCey6R5xQw4GxoyxSy8DfmG/yj 7IXWNojjrR5xkewKSS7xgDjiNXcFdC0euCsRihTG27d3PZBW9ayEEwMeXThjAZEuT/SW+6WdPWsb 9w9w5vvYWhQtr1kKeaHojnwMqxfnjiTVIs66ck2dwH6mjN0i/PI+zFZW5YF5zuPczr92fiPMnsum 8iAbGUubrqM/SIjkoO4P5vp7hYTXaVM3Kn41Sqj4z8SlDJ4WTBgqDQIDAQABAoIBAFa8zeCXRNa1 zef5VqtfLn2WBu5locyNPlTFKCD+UyZWyxDzBCnKzUkOceYH91u8DIaOVyHhSZG3NbEhDctFW7H/ B7oj0l4WA8MPzMcDiiPyyLBtrqZliHqXm1mMDdbUKSK3xR84x4mVaY1LPG4KqBd5GCifk/WzKtoU LrK7jvflQ3oqjVTrIifvZYIh4C4NaamotjztIZPtwsTDiIYk0SjxqJQytswIiQt/wPHOR3uHpyQ8 Ak6SNzeWtV3ghftjLJDCCyb32fE2j15Tw4bNimhS2Z34kG5INLSy5plwfPVRBPP2kUJk3KVZtpo9 foG9VxFDiwAve0jmDQKpacdjYS0CgYEA72Bdj+xWrUElSm+OPuhTuo7/Y/lod4RrnBGwPeyng6E2 4v/aZwBc8eIxZNtb43VZm4YO0iJFr+bf4i5I7/ptvp2iskRAskcUpUkCymhFr4DxDcpamJjdV2vr AdD8bLvd2NVmrOpCWHvbKVrQTyNeKeBJE8mnRFsaCcRZuokaJXsCgYEA31Sb/kclXHq0v3LywKrY
def __init__(self, cmd): self.__cmd = cmd self.__pmgr = PlaybookMgr( cmd['cluster_name'] )
def main(argv): ''' main function ''' try: opts, args = getopt.getopt(argv, "hvi", ['json=']) except getopt.GetoptError: usage() sys.exit(2) # defaults use_stdin = False json_file = '' for o,a in opts: if o == '-h': usage() sys.exit(2) elif o == '-v': print 'launch.py Version: %s' % version sys.exit(1) elif o == '-i': use_stdin = True elif o == '--json': json_file = a else: print 'unsupported option: %s' % o usage() sys.exit(2) if (use_stdin and json_file) or (not use_stdin and not json_file): print 'ERROR: Must specify exactly one of -i or --json' usage() sys.exit(2) jsonstr = None if use_stdin: lines = sys.stdin.readlines() jsonstr = ''.join(lines) elif json_file: f = open( json_file ) lines = f.readlines() jsonstr = ''.join(lines) # debug only # print '%s' % req try: req = inventoryreq.InventoryRequest( jsonstr ) Log = logutils.getLogger('writeinventory') Log.info('request: %s' % req.json_dumps()) pmgr = PlaybookMgr( req['cluster_name'] ) # the write_inventory method expects a list for each role role_map = {} for r in req['role_info'].iterkeys(): if type( req['role_info'][r] ) == list: role_map[r] = req['role_info'][r] elif type( req['role_info'][r] ) == unicode: role_map[r] = [ req['role_info'][r] ] else: raise Exception("writeinventory ERROR: unsupported type in role info %s : %s" % r, req['role_info'][r]) pmgr.write_inventory('infinidb', role_map ) except: import traceback print errormsg.ErrorMsg_from_parms( msg=json.dumps( traceback.format_exc() ) ) sys.exit(1) reply_json = '{ "rc" : 0 }' Log.info('reply: %s' % reply_json) print reply_json return 0
class FactGetter(object): def __init__(self, req): self.__req = req self.__pmgr = PlaybookMgr( req['cluster_name'] ) # TODO - add support for ssh_pass ssh_port = None if req.has_key('ssh_port'): ssh_port = req['ssh_port'] if req.has_key('ssh_key'): self.__pmgr.config_ssh( req['ssh_user'], req['ssh_key'], ssh_port=ssh_port ) else: self.__pmgr.config_ssh( req['ssh_user'], ssh_pass=req['ssh_pass'], ssh_port=ssh_port ) self.__pmgr.write_inventory( 'default', { 'all' : req['hostnames'] }) self.__role_info = {} self.__instance_info = {} self.__parsed_idbxml = False self.__user = req['ssh_user'] def write_envsetup(self): fname = '%s/env-setup' % self.__pmgr.get_rootdir() f = open( fname, 'w' ) f.write('#!/bin/bash\n') envvars = ['PYTHONPATH','ANSIBLE_LIBRARY','INFINIDB_EM_TOOLS_HOME'] for var in envvars: if os.environ.has_key(var): f.write('export %s=%s\n' % (var, os.environ[var])) def run_host(self, hostname): # debug only #print 'Running host %s' % hostname reslt = self.__pmgr.run_module( 'default', hostname, 'setup', no_raise=True, sudo=False ) fqdn = '' for h in reslt['dark']: fqdn = h self.__instance_info[fqdn] = dict( valid=False, reason=reslt['dark'][h]['msg'] ) for h in reslt['contacted']: if reslt['contacted'][h].has_key('failed'): # module execution failed, we need to report the error self.__instance_info[h] = dict( valid=False, reason=reslt['contacted'][h]['msg'] ) elif reslt['contacted'][h].has_key('ansible_facts'): host_facts = reslt['contacted'][h]['ansible_facts'] fqdn = host_facts['ansible_fqdn'] # do a test here to make sure that the server running emtools # can resolve the FQDN that ansible found. This guards against # a local, unrouteable hostname. Note that we already know the # original IP was ok because ansible was able to contact the host try: # try a lookup host = socket.gethostbyname(fqdn) except: # this is bad - it means the hostname is non-routable from the server self.__instance_info[fqdn] = dict( valid=False, reason='non-routeable FQDN: %s' % h) continue # we made contact so let's run our site_facts module try: site_reslt = self.__pmgr.run_module( 'default', hostname, 'site_facts', sudo=False ) except errormsg.ErrorMsg, exc: self.__instance_info[h] = dict( valid=False, reason=exc['msg'] ) continue site_facts = site_reslt['contacted'][h]['ansible_facts'] self.__instance_info[fqdn] = dict() # this first group of checks will determine whether the node is valid from # an EM perspective self.__instance_info[fqdn]['homedir'] = site_facts['homedir'] self.__instance_info[fqdn]['python_version'] = host_facts['ansible_python_version'] self.__instance_info[fqdn]['sudo'] = site_facts['sudo'] valid = True if ( self.__instance_info[fqdn]['sudo'] and ( self.__instance_info[fqdn]['python_version'][0:3] == '2.6' or self.__instance_info[fqdn]['python_version'][0:3] == '2.7') ) else False self.__instance_info[fqdn]['valid'] = valid if not valid: reason = 'no posswardless sudo' if not self.__instance_info[fqdn]['sudo'] else 'unsupported python version' else: reason = '' self.__instance_info[fqdn]['reason'] = reason self.__instance_info[fqdn]['ip_address'] = host_facts['ansible_all_ipv4_addresses'][0] self.__instance_info[fqdn]['hostname'] = host_facts['ansible_hostname'] self.__instance_info[fqdn]['os_family'] = host_facts['ansible_distribution'] self.__instance_info[fqdn]['gluster_version'] = site_facts['gluster_version'] self.__instance_info[fqdn]['hadoop_version'] = site_facts['hadoop_version'] self.__instance_info[fqdn]['pdsh_version'] = site_facts['pdsh_version'] self.__instance_info[fqdn]['infinidb_version'] = site_facts['infinidb_version'] self.__instance_info[fqdn]['infinidb_installdir'] = site_facts['infinidb_installdir'] self.__instance_info[fqdn]['infinidb_user'] = site_facts['infinidb_user'] # ansible does not report ansible_processor_vcpus on Mac OS if host_facts.has_key('ansible_processor_vcpus'): self.__instance_info[fqdn]['processor_count'] = host_facts['ansible_processor_vcpus'] else: self.__instance_info[fqdn]['processor_count'] = host_facts['ansible_processor_cores'] self.__instance_info[fqdn]['memory_available'] = host_facts['ansible_memtotal_mb'] # ansible does not report ansible_swaptotal_mb on Mac OS if host_facts.has_key('ansible_swaptotal_mb'): self.__instance_info[fqdn]['swap_configured'] = host_facts['ansible_swaptotal_mb'] self.__instance_info[fqdn]['em_components'] = {} self.__instance_info[fqdn]['em_components']['collectd'] = site_facts['collectd_version'] self.__instance_info[fqdn]['em_components']['python-stack'] = site_facts['python-stack_version'] self.__instance_info[fqdn]['em_components']['graphite'] = site_facts['graphite_version'] self.__instance_info[fqdn]['em_components']['tools'] = site_facts['tools_version'] self.__instance_info[fqdn]['em_components']['oam-server'] = '' self.__instance_info[fqdn]['deployment_type'] = '' self.__instance_info[fqdn]['storage_type'] = '' self.__instance_info[fqdn]['system_name'] = '' self.__instance_info[fqdn]['port3306available'] = site_facts['port3306available'] else: # no clue what happened - didn't see Failed but no ansible facts, return the whole result in the reason field self.__instance_info[h] = dict( valid=False, reason='%s' % reslt['contacted'][h] ) if not self.__parsed_idbxml and self.__instance_info[fqdn]['valid'] and self.__instance_info[fqdn]['infinidb_version'] and self.__instance_info[fqdn]['sudo']: self.__parsed_idbxml = True # we found infinidb software. Run our getinfo playbook to retrieve Calpont.xml rc, results, out, err = self.__pmgr.run_playbook('getinfo.yml', 'default', host_subset=hostname) if rc == 0: xml = idbxml.IdbXml( '%s/cluster_files/Calpont.xml' % (self.__pmgr.get_rootdir()) ) for r in xml.get_all_roles(): # eoch role here has role= and ip_address= but we need to put # map role to hostname for our reply. role = r['role'] ip = r['ip_address'] for i in self.__instance_info.iterkeys(): self.__instance_info[i]['deployment_type'] = xml.get_parm('Installation', 'ServerTypeInstall') self.__instance_info[i]['storage_type'] = xml.get_parm('Installation', 'DBRootStorageType') self.__instance_info[i]['system_name'] = xml.get_parm('SystemConfig', 'SystemName') host = '' try: # try a lookup host = socket.gethostbyaddr(ip)[0] if host == 'localhost': host = fqdn except: # this is bad - it means that the Calpont.xml we found uses IP addresses that don't # resolve to hostnames on the server. self.__instance_info[fqdn]['valid'] = False self.__instance_info[fqdn]['reason'] = 'Calpont.xml contained IP address %s that does not resolve to a hostname' % ip break self.__role_info[role] = host # check to see if this host needs to be added to the request if not self.__instance_info.has_key(host): self.__req['hostnames'].append(host) self.__pmgr.write_inventory( 'default', { 'all' : self.__req['hostnames'] }) self.run_host(host) if self.__instance_info[fqdn]['valid']: # only do these updates if the node is still considered valid - may get set to False # above if unknown IPs in Calpont.xml invdict = { 'all' : self.__req['hostnames'], 'pm:children' : [ 'pm1' ], 'pm1' : [ self.__role_info['pm1'] ] } if self.__role_info.has_key('pm2'): invdict['pm2'] = [ self.__role_info['pm2'] ] self.__pmgr.write_inventory( 'infinidb', invdict ) varlist = [ ('em_server',socket.gethostbyname(socket.gethostname())), ('infinidb_installdir',self.__instance_info[fqdn]['infinidb_installdir']), ('infinidb_user',self.__instance_info[fqdn]['infinidb_user']) ] self.__pmgr.write_vars('pm', varlist) else: # some kind of problem running getinfo.yml...Will reset the entire host info self.__instance_info[h] = { 'valid' : False, 'reason': 'Failed to run playbook getinfo.yml: rc=%s, stdout=%s, stderr=%s' % \ ( rc, out, err ) }
def run_cmd(self): """ Prepare and run the ansible playbook command for the operation type specified in the constructor """ self._rundir = self._cluster.get_rundir() self._pkgdir = self._cluster.get_pkgdir() self._pkgfile = self._cluster.get_pkgfile() self._idbuser = self._cluster.config()["idbuser"] eflag = self._cluster.config()["enterprise"] if eflag: self._entflag = "true" else: self._entflag = "false" self._version = self._pkgfilenameparser.get_pkg_version(self._pkgfile) self._hadoop = self._cluster.config()["hadoop"] self._hdfsflag = "false" if self._hadoop: self._hdfsflag = "true" self._upgfile = self._cluster.get_upgfile() self._upgversion = None if self._upgfile: self._upgversion = self._pkgfilenameparser.get_pkg_version(self._upgfile) m = self._cluster.machine("pm1") self._pm1_ip = m["ip"] self._postconfig_opts = self._cluster.get_postconfig_opts() # Add -em to postconfig flags for version 4.6 and up if self._optype == "pkginstall": if ConfigSpec._version_greaterthan(self._version, "4.6.0-0"): self._postconfig_opts += " -em" (ansible_yml, cmdargs) = self._prepare_playbook_pkginstall() elif self._optype == "pkgupgrade": if ConfigSpec._version_greaterthan(self._upgversion, "4.6.0-0"): self._postconfig_opts += " -em" (ansible_yml, cmdargs) = self._prepare_playbook_pkgupgrade() elif self._optype == "bininstall": if ConfigSpec._version_greaterthan(self._version, "4.6.0-0"): self._postconfig_opts += " -em" (ansible_yml, cmdargs) = self._prepare_playbook_bininstall() elif self._optype == "binupgrade": if ConfigSpec._version_greaterthan(self._upgversion, "4.6.0-0"): self._postconfig_opts += " -em" (ansible_yml, cmdargs) = self._prepare_playbook_binupgrade() else: raise Exception("Unsupported ansible playbook type to run: %s" % self._optype) extra_playdir = self._cluster.get_extra_playbook_dir() p = PlaybookMgr(os.path.basename(self._rundir), extra_playdir) # create ansible inventory file with list of hosts; # should already exist for an EM-triggered install. full_inv_file = "%s/%s" % (p.get_rootdir(), self._inventory_filename) if not os.path.exists(full_inv_file): machines = self._cluster.machines() iplist = [] infnodelist = [] for key in machines: m = machines[key] iplist.append(m["ip"]) # if we are using the EM in invm mode we don't want that # node to participate in the normal InfiniDB install if key != "em1": # f.write("key: %s.calpont.com; ip: %s\n" % (key,m['ip'])) infnodelist.append(m["ip"]) ipdict = {"all": iplist, "infinidb_nodes": infnodelist} p.write_inventory(self._inventory_filename, ipdict) # create ansible.cfg file; # should already exist for an EM-triggered install. full_ans_file = "%s/%s" % (p.get_rootdir(), "ansible.cfg") if not os.path.exists(full_ans_file): keytext = self._cluster.get_sshkey_text() p.config_ssh(self._idbuser, keytext) # execute playbook thru PlaybookMgr self._ansible_file = ansible_yml self._extra_vars = cmdargs rc, results, out, err = p.run_playbook(ansible_yml, self._inventory_filename, playbook_args=cmdargs) return rc, results, out, err