def testNegative1(self): '''test malformed json as well as non-map root objects''' with self.assertRaisesRegexp(ValueError,"Extra data.*"): ConfigSpec('"key" : "value"') with self.assertRaisesRegexp(Exception,".*not a map.*"): ConfigSpec('["key", "value"]') ConfigSpec('"somevalue"') with self.assertRaisesRegexp(ValueError,"Expecting.*"): ConfigSpec('["key" : "value"')
def testConstructOverride(self): f = open('/tmp/cspec','w') s = """{ "name" : "d-cluster", "rolespec" : { "pm" : { "count" : 4, "memory" : 1024, "dbroots_list" : [[1,2],[3,4],[5,6],[7,8]] } } } """ f.write(s) f.close() cfg = ConfigSpec(jsonfile='/tmp/cspec',idbversion='some-old-version',boxtype='some-old-boxtype') self.assertEqual( cfg['name'], "d-cluster" ) self.assertEqual( cfg['idbversion'], "some-old-version" ) self.assertEqual( cfg['boxtype'], "some-old-boxtype" ) self.assertEqual( len(cfg['rolespec']), 1 ) self.assertEqual( cfg['rolespec']['pm']['count'] , 4 ) self.assertEqual( cfg['rolespec']['pm']['memory'] , 1024 ) self.assertEqual( len(cfg['rolespec']['pm']['dbroots_list']) , 4 ) self.assertEqual( len(cfg['rolespec']['pm']['dbroots_list'][0]) , 2 ) self.assertEqual( cfg['rolespec']['pm']['dbroots_list'][0][0] , 1 ) self.assertEqual( cfg['rolespec']['pm']['dbroots_list'][0][1] , 2 )
def testConstruct2(self): s = """{ "name" : "b-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "rolespec" : { "pm" : { "count" : 4, "memory" : 1024, "dbroots_list" : [[1,2],[3,4],[5,6],[7,8]] } } } """ cfg = ConfigSpec(s) self.assertEqual( cfg['name'], "b-cluster" ) self.assertEqual( cfg['idbversion'], "3.5.1-4" ) self.assertEqual( cfg['boxtype'], "cal-precise64" ) self.assertEqual( len(cfg['rolespec']), 1 ) self.assertEqual( cfg['rolespec']['pm']['count'] , 4 ) self.assertEqual( cfg['rolespec']['pm']['memory'] , 1024 ) self.assertEqual( len(cfg['rolespec']['pm']['dbroots_list']) , 4 ) self.assertEqual( len(cfg['rolespec']['pm']['dbroots_list'][0]) , 2 ) self.assertEqual( cfg['rolespec']['pm']['dbroots_list'][0][0] , 1 ) self.assertEqual( cfg['rolespec']['pm']['dbroots_list'][0][1] , 2 )
def testConstruct1(self): s = """{ "name" : "a-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "rolespec" : { "pm" : { "count" : 4, "memory" : 1024, "dbroots_per" : 2 }, "um" : { "count" : 1, "memory" : 1024 } } } """ cfg = ConfigSpec(s) self.assertEqual( cfg['name'], "a-cluster" ) self.assertEqual( cfg['idbversion'], "3.5.1-4" ) self.assertEqual( cfg['boxtype'], "cal-precise64" ) self.assertEqual( len(cfg['rolespec']), 2 ) self.assertEqual( cfg['rolespec']['pm']['count'] , 4 ) self.assertEqual( cfg['rolespec']['pm']['memory'] , 1024 ) self.assertEqual( cfg['rolespec']['pm']['dbroots_per'] , 2 ) self.assertEqual( cfg['rolespec']['um']['count'] , 1 ) self.assertEqual( cfg['rolespec']['um']['memory'] , 1024 )
def testConstructFromFile(self): f = open('/tmp/cspec','w') s = """{ "name" : "c-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "rolespec" : { "pm" : { "count" : 4, "memory" : 1024, "dbroots_list" : [[1,2],[3,4],[5,6],[7,8]] } } } """ f.write(s) f.close() cfg = ConfigSpec(jsonfile='/tmp/cspec') self.assertEqual( cfg['name'], "c-cluster" ) self.assertEqual( cfg['idbversion'], "3.5.1-4" ) self.assertEqual( cfg['boxtype'], "cal-precise64" ) self.assertEqual( len(cfg['rolespec']), 1 ) self.assertEqual( cfg['rolespec']['pm']['count'] , 4 ) self.assertEqual( cfg['rolespec']['pm']['memory'] , 1024 ) self.assertEqual( len(cfg['rolespec']['pm']['dbroots_list']) , 4 ) self.assertEqual( len(cfg['rolespec']['pm']['dbroots_list'][0]) , 2 ) self.assertEqual( cfg['rolespec']['pm']['dbroots_list'][0][0] , 1 ) self.assertEqual( cfg['rolespec']['pm']['dbroots_list'][0][1] , 2 )
def testNegative2(self): '''test missing idbversion''' s = """{ "name" : "a-cluster", "boxtype" : "cal-precise64", "rolespec" : {} } """ with self.assertRaisesRegexp(Exception,"ConfigSpec did not specify.*idbversion"): ConfigSpec(s)
def testNegative3(self): '''test missing name''' s = """{ "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "rolespec" : {} } """ with self.assertRaisesRegexp(Exception,"ConfigSpec did not specify.*name"): ConfigSpec(s)
def testNegative5(self): '''test missing rolespec''' s = """{ "name" : "a-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64" } """ with self.assertRaisesRegexp(Exception,"ConfigSpec did not specify.*rolespec"): ConfigSpec(s)
def testNegative6(self): '''test a wrong type for machines''' s = """{ "name" : "a-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "rolespec" : "myrolespecs" } """ with self.assertRaisesRegexp(Exception,"ConfigSpec has wrong type.*rolespec"): ConfigSpec(s)
def testPmQuery(self): # test the default value when not specified s = """{ "name" : "e-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "rolespec" : { "pm" : { "count" : 2, "memory" : 1024, "dbroots_per" : 2 } } } """ cfg = ConfigSpec(s) self.assertTrue( cfg.has_key('pm_query') ) self.assertEqual( cfg['pm_query'], False ) # test with enterprise specified s = """{ "name" : "e-cluster", "idbversion" : "4.5.0-1", "boxtype" : "cal-precise64", "pm_query" : true, "rolespec" : { "pm" : { "count" : 2, "memory" : 1024, "dbroots_per" : 2 } } } """ cfg = ConfigSpec(s) self.assertTrue( cfg.has_key('pm_query') ) self.assertEqual( cfg['pm_query'], True ) # now test an invalid config s = """{ "name" : "e-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "pm_query" : true, "rolespec" : { "pm" : { "count" : 2, "memory" : 1024, "dbroots_per" : 2 } } } """ cfg = ConfigSpec(s) with self.assertRaisesRegexp(Exception,"PM local query option only supported.*"): cfg.validate()
def testHadoop(self): # test a success path s = """{ "name" : "e-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64-hadoop", "rolespec" : { "pm" : { "count" : 2, "memory" : 1024, "dbroots_per" : 2 } }, "hadoop" : { "instance-templates" : "1 hadoop-namenode+hadoop-jobtracker,2 hadoop-datanode+hadoop-tasktracker" } } """ cfg = ConfigSpec(s) self.assertTrue( cfg.has_key('hadoop') ) self.assertEqual( cfg['hadoop']['instance-templates'], "1 hadoop-namenode+hadoop-jobtracker,2 hadoop-datanode+hadoop-tasktracker" )
def testNonroot(self): # test the default value when not specified s = """{ "name" : "a-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "rolespec" : { "pm" : { "count" : 4, "memory" : 1024, "dbroots_per" : 2 }, "um" : { "count" : 1, "memory" : 1024 } } } """ cfg = ConfigSpec(s) self.assertTrue( cfg.has_key('idbuser') ) self.assertEqual( cfg['idbuser'], 'root' ) self.assertEqual(cfg.infinidb_install_dir(), '/usr/local/Calpont') s = """{ "name" : "a-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "idbuser" : "calpont", "rolespec" : { "pm" : { "count" : 4, "memory" : 1024, "dbroots_per" : 2 }, "um" : { "count" : 1, "memory" : 1024 } } } """ cfg = ConfigSpec(s) self.assertTrue( cfg.has_key('idbuser') ) self.assertEqual( cfg['idbuser'], 'calpont' ) self.assertEqual(cfg.infinidb_install_dir(), '/home/calpont/Calpont')
def testNegative9(self): '''Test missing dbroots_* in pm role''' s = """{ "name" : "a-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "rolespec" : { "pm" : { "count" : 1 } } } """ with self.assertRaisesRegexp(Exception,"Must specify either dbroots_per or dbroots_list.*"): ConfigSpec(s)
def testNegative8(self): '''Test missing count in pm role''' s = """{ "name" : "a-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "rolespec" : { "pm" : { "dbroots_per" : 1 } } } """ with self.assertRaisesRegexp(Exception,"ConfigSpec did not specify required attribute.*count"): ConfigSpec(s)
def _write_multi(self, wf): '''Writes input for multi-node configurations.''' wf.write('2\n') # 2 = multi # EC2 prompt is version 3.0 and later if not self._using22: wf.write('n\n') # not EC2 combined = 2 if self._cluster.config()['rolespec'].has_key('um'): combined = 1 wf.write('%d\n' % combined) # 2 = combined, 1 = separate # if combined == 1 and ConfigSpec._version_greaterthan(self._cluster.config()['idbversion'],'4.5.0-0'): # if self._cluster.config()['pm_query']: # wf.write('y\n') # else: # wf.write('n\n') # pm with user = n; applies to version 4.5 and up wf.write('%s\n' % self._cluster.config()['name']) wf.write('pm1\n') # local module name always pm1 by convention if self._cluster.config()['datdup'] and ConfigSpec._version_greaterthan('4.0.0-0',self._cluster.config()['idbversion']): wf.write('y\n') # y = use datdup else: self._write_storage_type(wf) if self._using22: # this is 2.2 - there is a # dbroots prompt here we need to sum up across all nodes dbrootcnt = 0 for m in self._cluster.machines(): dbrootcnt += len(self._cluster.machines()[m]['dbroots']) wf.write('%d\n' % dbrootcnt) if combined == 1: self._write_nodes(wf, 'um') self._write_nodes(wf, 'pm', nodbroot=self._using22) if not self._nonroot: wf.write('y\n') # y = disable SNMP trap else: wf.write('n\n') # n = keep SNMP trap disabled (non-root always defaults off) wf.write('n\n') # n = no external devices # note that as it stands right now we cannot run the install in this method # because postConfigure does a getpass() which does not read from STDIN # if we want to do this, postConfigure needs an enhancement to bypass that option wf.write('y\n') # perform system install wf.write('%s\n' % self._ptype) # package type if self._cluster.config()['datdup']: self._write_datdup(wf) wf.write('y\n') # startup system
def testNegative7(self): '''Test missing pm rolespec''' s = """{ "name" : "a-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "rolespec" : { "um" : { "count" : 1, "memory" : 1024 } } } """ with self.assertRaisesRegexp(Exception,"rolespec does not specify a pm role.*"): ConfigSpec(s)
def testNegative13(self): '''Test invalid dbroots_per''' s = """{ "name" : "a-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "rolespec" : { "pm" : { "count" : 1, "dbroots_list" : [1] } } } """ with self.assertRaisesRegexp(Exception,"dbroots_list.* is not a list.*"): ConfigSpec(s)
def testNegative11(self): '''Test invalid dbroots_per''' s = """{ "name" : "a-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "rolespec" : { "pm" : { "count" : 1, "dbroots_per" : 0 } } } """ with self.assertRaisesRegexp(Exception,"Must have at least 1 dbroot per pm.*"): ConfigSpec(s)
def testNegative10(self): '''Test multiple dbroots_* in pm role''' s = """{ "name" : "a-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "rolespec" : { "pm" : { "count" : 1, "dbroots_per" : 4, "dbroots_list" : [[1,2,3,4]] } } } """ with self.assertRaisesRegexp(Exception,"Cannot specify both dbroots_.*"): ConfigSpec(s)
def testUpdate(self): s = """{ "name" : "b-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "rolespec" : { "pm" : { "count" : 4, "memory" : 1024, "dbroots_list" : [[1,2],[3,4],[5,6],[7,8]] } } } """ cfg = ConfigSpec(s) cfg['newattr'] = 'newvalue' self.assertEqual( cfg['newattr'], "newvalue" )
def testMemUpdate(self): # test the default value when not specified s = """{ "name" : "e-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "rolespec" : { "pm" : { "count" : 2, "dbroots_per" : 2 } } } """ cfg = ConfigSpec(s) self.assertFalse( cfg['rolespec']['pm'].has_key('memory') ) cfg['rolespec']['pm']['memory'] = 1024 self.assertTrue( cfg['rolespec']['pm'].has_key('memory') ) self.assertEquals( cfg['rolespec']['pm']['memory'], 1024 ) cfg['rolespec']['pm']['memory'] = 4096 self.assertEquals( cfg['rolespec']['pm']['memory'], 4096 )
def _write_storage_type(self, wf): if not self._using22: # versions 3.0 or later 1 = internal, 2 = external # starting at version 4.0 or later, 3 = glusterfs, 4 = hdfs if self._cluster.config()['hadoop']: wf.write('4\n') # 4 = hdfs storage # TODO - may have to support plugin configuration - not sure if we can # trust postConfigure to present the right one as default. Regardless, # not relevant until we add alternate hadoop version support wf.write('\n') # accept default plugin elif self._cluster.config()['datdup'] and ConfigSpec._version_greaterthan(self._cluster.config()['idbversion'],'4.0.0-0'): wf.write('3\n') # 3 = glusterfs storage elif self._cluster.config()['storage'] == 'external': wf.write('2\n') # 2 = external storage else: wf.write('1\n') # 1 = internal storage else: # versions earlier than 3.0, 1 = external, 2 = internal if self._cluster.config()['storage'] == 'external': wf.write('1\n') # 1 = external storage else: wf.write('2\n') # 2 = internal storage
def testNegative14(self): '''Test invalid dbroots_per''' s = """{ "name" : "a-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "rolespec" : { "pm" : { "count" : 1, "dbroots_per" : 8 }, "um" : { "count" : 1 }, "umpm" : { "count" : 1 } } } """ with self.assertRaisesRegexp(Exception,"Unknown role.*"): ConfigSpec(s)
def write_input(self, fname, cluster, ptype): '''Writes the postConfigure input file. @param fname - output file location @param cluster - the Cluster instance we want to postConfigure ''' self._cluster = cluster self._ptype = ptype if self._cluster.config()['idbuser'] != 'root': self._nonroot = True else: self._nonroot = False # there are a number of differences for 2.2. cache a boolean to tell us # whether or not we are in that case self._using22 = not ConfigSpec._version_greaterthan(self._cluster.config()['idbversion'],'3.0.0-0') wf = open( fname, 'w' ) if len(self._cluster.machines()) > 1: self._write_multi(wf) else: self._write_single(wf) wf.close()
def testHadoop(self): # test a success path s = """{ "name" : "e-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64-hadoop", "rolespec" : { "pm" : { "count" : 2, "memory" : 1024, "dbroots_per" : 2 } }, "hadoop" : { "instance-templates" : "1 hadoop-namenode+hadoop-jobtracker,2 hadoop-datanode+hadoop-tasktracker" } } """ cfg = ConfigSpec(s) self.assertTrue( cfg.has_key('hadoop') ) self.assertEqual( cfg['hadoop']['instance-templates'], "1 hadoop-namenode+hadoop-jobtracker,2 hadoop-datanode+hadoop-tasktracker" ) # test hadoop without any hadoop attributes, to simulate # the usage for an EM type install s = """{ "name" : "e-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64-hadoop", "upgrade" : "3.5.2-2", "rolespec" : { "pm" : { "count" : 2, "memory" : 1024, "dbroots_per" : 2 } }, "hadoop" : { } } """ cfg = ConfigSpec(s) self.assertTrue( cfg.has_key('hadoop') ) # test invalid boxtype s = """{
def testEnterprise(self): # test the default value when not specified s = """{ "name" : "e-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "rolespec" : { "pm" : { "count" : 2, "memory" : 1024, "dbroots_per" : 2 } } } """ cfg = ConfigSpec(s) self.assertTrue( cfg.has_key('enterprise') ) self.assertEqual( cfg['enterprise'], True ) # test with enterprise specified s = """{ "name" : "e-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "enterprise" : false, "rolespec" : { "pm" : { "count" : 2, "memory" : 1024, "dbroots_per" : 2 } } } """ cfg = ConfigSpec(s) self.assertTrue( cfg.has_key('enterprise') ) self.assertEqual( cfg['enterprise'], False )
def testUpgrade(self): # test the default value when not specified s = """{ "name" : "e-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "rolespec" : { "pm" : { "count" : 2, "memory" : 1024, "dbroots_per" : 2 } } } """ cfg = ConfigSpec(s) self.assertTrue( cfg.has_key('upgrade') ) self.assertEqual( cfg['upgrade'], '' ) # test with external storage enabled s = """{ "name" : "e-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "upgrade" : "3.5.2-2", "rolespec" : { "pm" : { "count" : 2, "memory" : 1024, "dbroots_per" : 2 } } } """ cfg = ConfigSpec(s) self.assertTrue( cfg.has_key('upgrade') ) self.assertEqual( cfg['upgrade'], '3.5.2-2' )
def run_cmd(self): ''' Prepare and run the ansible playbook command for the operation type specified in the constructor ''' self._rundir = self._cluster.get_rundir() self._pkgdir = self._cluster.get_pkgdir() self._pkgfile = self._cluster.get_pkgfile() self._idbuser = self._cluster.config()['idbuser'] eflag = self._cluster.config()['enterprise'] if eflag: self._entflag = "true" else: self._entflag = "false" self._version = self._pkgfilenameparser.get_pkg_version(self._pkgfile) self._hadoop = self._cluster.config()['hadoop'] self._hdfsflag = "false" if self._hadoop: self._hdfsflag = "true" self._upgfile = self._cluster.get_upgfile() self._upgversion = None if self._upgfile: self._upgversion = self._pkgfilenameparser.get_pkg_version( self._upgfile) m = self._cluster.machine('pm1') self._pm1_ip = m['ip'] self._postconfig_opts = self._cluster.get_postconfig_opts() # Add -em to postconfig flags for version 4.6 and up if self._optype == 'pkginstall': if ConfigSpec._version_greaterthan(self._version, '4.6.0-0'): self._postconfig_opts += " -em" (ansible_yml, cmdargs) = self._prepare_playbook_pkginstall() elif self._optype == 'pkgupgrade': if ConfigSpec._version_greaterthan(self._upgversion, '4.6.0-0'): self._postconfig_opts += " -em" (ansible_yml, cmdargs) = self._prepare_playbook_pkgupgrade() elif self._optype == 'bininstall': if ConfigSpec._version_greaterthan(self._version, '4.6.0-0'): self._postconfig_opts += " -em" (ansible_yml, cmdargs) = self._prepare_playbook_bininstall() elif self._optype == 'binupgrade': if ConfigSpec._version_greaterthan(self._upgversion, '4.6.0-0'): self._postconfig_opts += " -em" (ansible_yml, cmdargs) = self._prepare_playbook_binupgrade() else: raise Exception("Unsupported ansible playbook type to run: %s" % self._optype) extra_playdir = self._cluster.get_extra_playbook_dir() p = PlaybookMgr(os.path.basename(self._rundir), extra_playdir) # create ansible inventory file with list of hosts; # should already exist for an EM-triggered install. full_inv_file = '%s/%s' % (p.get_rootdir(), self._inventory_filename) if not os.path.exists(full_inv_file): machines = self._cluster.machines() iplist = [] infnodelist = [] for key in machines: m = machines[key] iplist.append(m['ip']) # if we are using the EM in invm mode we don't want that # node to participate in the normal InfiniDB install if key != 'em1': #f.write("key: %s.calpont.com; ip: %s\n" % (key,m['ip'])) infnodelist.append(m['ip']) ipdict = {'all': iplist, 'infinidb_nodes': infnodelist} p.write_inventory(self._inventory_filename, ipdict) # create ansible.cfg file; # should already exist for an EM-triggered install. full_ans_file = '%s/%s' % (p.get_rootdir(), 'ansible.cfg') if not os.path.exists(full_ans_file): keytext = self._cluster.get_sshkey_text() p.config_ssh(self._idbuser, keytext) # execute playbook thru PlaybookMgr self._ansible_file = ansible_yml self._extra_vars = cmdargs rc, results, out, err = p.run_playbook(ansible_yml, self._inventory_filename, playbook_args=cmdargs) return rc, results, out, err
def testEm(self): # test the default value when not specified s = """{ "name" : "e-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "rolespec" : { "pm" : { "count" : 2, "memory" : 1024, "dbroots_per" : 2 } } } """ cfg = ConfigSpec(s) self.assertTrue( cfg.has_key('em') ) self.assertEqual( cfg['em'], None ) # test with em option s = """{ "name" : "e-cluster", "idbversion" : "4.5.0-1", "boxtype" : "cal-precise64", "em" : { "present" : true }, "rolespec" : { "pm" : { "count" : 2, "memory" : 1024, "dbroots_per" : 2 } } } """ cfg = ConfigSpec(s) self.assertTrue( cfg.has_key('em') ) self.assertEqual( cfg['em']['present'], True ) self.assertEqual( cfg['em']['emhost'], 'localhost' ) self.assertEqual( cfg['em']['emport'], 9090 ) self.assertEqual( cfg['em']['oamserver_role'], 'um1' ) self.assertEqual( cfg['em']['invm'], False ) self.assertFalse( cfg['em'].has_key('boxtype') ) self.assertFalse( cfg['em'].has_key('version') ) # test with fully specified em option s = """{ "name" : "e-cluster", "idbversion" : "4.5.0-1", "boxtype" : "cal-precise64", "em" : { "present" : true, "emhost" : "testhost", "emport" : 7120, "oamserver_role" : "pm1" }, "rolespec" : { "pm" : { "count" : 2, "memory" : 1024, "dbroots_per" : 2 } } } """ cfg = ConfigSpec(s) self.assertTrue( cfg.has_key('em') ) self.assertEqual( cfg['em']['present'], True ) self.assertEqual( cfg['em']['emhost'], 'testhost' ) self.assertEqual( cfg['em']['emport'], 7120 ) self.assertEqual( cfg['em']['oamserver_role'], 'pm1' ) self.assertEqual( cfg['em']['invm'], False ) # now test an invalid config s = """{ "name" : "e-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "em" : { "foo" : "bar" }, "rolespec" : { "pm" : { "count" : 2, "memory" : 1024, "dbroots_per" : 2 } } } """ with self.assertRaisesRegexp(Exception,"Must specify present flag.*"): cfg = ConfigSpec(s) # now test an invalid config s = """{ "name" : "e-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "em" : { "present" : true }, "rolespec" : { "pm" : { "count" : 2, "memory" : 1024, "dbroots_per" : 2 } } } """ cfg = ConfigSpec(s) with self.assertRaisesRegexp(Exception,"Enterprise Manager.*only supported.*"): cfg.validate() # test the whether the global empresent property works s = """{ "name" : "e-cluster", "idbversion" : "4.5.0-1", "boxtype" : "cal-centos6", "rolespec" : { "pm" : { "count" : 2, "memory" : 1024, "dbroots_per" : 2 } } } """ common.props['cluster.cluster.empresent'] = True try: cfg = ConfigSpec(s) except Exception, exc: common.props['cluster.cluster.empresent'] = False raise exc
def run_random_basic002(vers, vmitype, num = None, enterprise = True): '''Runs basic002 on a randomly generated config. Note that the enterprise parameter is intentionally ignored.''' cfgs = [ ( 'multi_2umpm_combo', 0.5 ), ( 'multi_1um_2pm', 0.5 ), ] if ConfigSpec._version_greaterthan(vers,'3.5.3'): cfgs.append( ('singlenode', 0.5) ) idbusers = [ ( 'root', 0.6 ), ( 'calpont', 0.4 ) ] datdups = [ ( True, 0.2 ), ( False, 0.8 ) ] binaries = [ ( True, 0.2 ), ( False, 0.8 ) ] storages = [ ('internal', 0.5 ), ('external', 0.5 ) ] enterprises = [ ( True, 0.5 ), ( False, 0.5 ) ] pmquerys = [ ( True, 0.5 ), ( False, 0.5 ) ] emroles = [ ( 'um1', 0.7), ( 'em1', 0.2), ( 'pm1', 0.1) ] runlist = [] if num <= 0: # for this runlist we always want to generate at least one num = 1 for i in range (0, num): boxtype = _choose_rand_boxtype() cfgname = _choose_weighted(cfgs) cfg = configs.call_by_name(cfgname, vers, boxtype) idbuser = _choose_weighted(idbusers) cfg['idbuser'] = idbuser enterprise = _choose_weighted(enterprises) cfg['enterprise'] = enterprise # technically we supported datdup since 3.5.1 but are rereleasing with a different # strategy for installation/integration in 4.0 if ConfigSpec._version_greaterthan(vers,'4.0.0-0') and \ vagboxes.datdup_support(boxtype) and \ cfg['rolespec']['pm']['count'] > 1 and \ enterprise == True: datdup = _choose_weighted(datdups) else: datdup = False # TODO: due to various InfiniDB bugs, datdup (i.e. glusterfs) support is # currently broken so hardcoding this to false. #cfg['datdup'] = datdup cfg['datdup'] = False # EM related checks if common.props['cluster.cluster.eminvm'] and not vagboxes.em_support(boxtype): # this can happen in the emboxonly flag is not set and a "legacy" box gets chosen. # in that case we just reset the 'em' field to None to bypass any EM in this test cfg['em'] = None elif common.props['cluster.cluster.eminvm']: # randomly vary which node the EM is assugned to cfg['em']['role'] = _choose_weighted(emroles) if idbuser == 'root': cfg['binary'] = _choose_weighted(binaries) if ConfigSpec._version_greaterthan(vers,'4.5.0-0'): cfg['pm_query'] = _choose_weighted(pmquerys) # for unknown reasons there is an issue with the external storage # configuration on the smaller of the 3 initial autooam machines # (srvautooam, srvoam1, srvoam2). For now, we will avoid running # external storage on srvautooam by checking system memory. NOTE: # the amount of memory may or may not have anything at all to do # with the manifestation of the issue # datdup only works with internal storage, so don't attempt an override here if not datdup and psutil.virtual_memory().total >= 16825044992L: cfg['storage'] = _choose_weighted(storages) runlist.append( (cfg, 'vagrant', tests.basic002() ) ) return runlist
def run_upgrade_suite(vers, vmitype, num = None, enterprise = True): '''Runs standard set up upgrade tests for version under test.''' if common.props['cluster.cluster.use_em_for_dbinstall']: raise Exception('run_upgrade_suite does not support cluster.cluster.use_em_for_dbinstall!') runlist = [] vmgr = VersionManager() baselist = common.props['testlib.runlists.upgradefrom'] streams = baselist.split(',') for s in streams: # if the stream is the same as the version under test # then we need to grab the last release on this stream minusone = False if not vers.find(s) == 0 else True basever = s try: basever = vmgr.latest_release_vers(s, minusone) except: # if we get here, we assume that s is a specific version # that the user wants to upgrade from pass if basever and ConfigSpec._version_greaterthan(vers,basever): if ConfigSpec._version_greaterthan(basever, '3.5.1-6' ) or \ ConfigSpec._version_greaterthan('3.0.0-0', basever ): # anything between 3.0 and 3.5.1-5 does not support single server # installs because of the postconfigure race issue. cfg = configs.singlenode(basever) cfg['boxtype'] = 'cal-centos58' cfg['binary'] = True cfg['upgrade'] = vers cfg['enterprise'] = enterprise cfg['em'] = None # guarantee no EM since this is a "legacy" boxtype runlist.append( (cfg, 'vagrant', tests.upgrade001()) ) cfg = configs.singlenode(basever) cfg['boxtype'] = 'cal-debian6' cfg['upgrade'] = vers cfg['enterprise'] = enterprise if not ConfigSpec._version_greaterthan(basever, '4.5.1-3' ): # will not repeat this comment each time, but we can use the EM # here as long as it is in attach mode and we are on a supported # version cfg['em'] = None runlist.append( (cfg, 'vagrant', tests.upgrade001()) ) cfg = configs.singlenode(basever) cfg['boxtype'] = 'cal-centos6' cfg['upgrade'] = vers cfg['enterprise'] = enterprise if not ConfigSpec._version_greaterthan(basever, '4.5.1-3' ): cfg['em'] = None runlist.append( (cfg, 'vagrant', tests.upgrade001()) ) cfg = configs.singlenode(basever) cfg['boxtype'] = 'cal-precise64' cfg['upgrade'] = vers cfg['enterprise'] = enterprise if not ConfigSpec._version_greaterthan(basever, '4.5.1-3' ): cfg['em'] = None runlist.append( (cfg, 'vagrant', tests.upgrade001()) ) if ConfigSpec._version_greaterthan(basever, '3.5.1-5' ): # binary install supported after 3.5.1-5 cfg = configs.singlenode(basever) cfg['boxtype'] = 'cal-centos6' cfg['idbuser'] = '******' cfg['upgrade'] = vers cfg['enterprise'] = enterprise if not ConfigSpec._version_greaterthan(basever, '4.5.1-3' ): cfg['em'] = None runlist.append( (cfg, 'vagrant', tests.upgrade001()) ) if not s == '2.2': # not going to support multi-node upgrades from 2.2 because if # differences in the Calpont.xml. Could support, but would need # to switch to using a postconfigure.in on the upgrade run of # postconfigure cfg = configs.multi_1um_2pm(basever) cfg['boxtype'] = 'cal-centos6' cfg['binary'] = True cfg['upgrade'] = vers cfg['enterprise'] = enterprise if not ConfigSpec._version_greaterthan(basever, '4.5.1-3' ): cfg['em'] = None runlist.append( (cfg, 'vagrant', tests.upgrade001()) ) cfg = configs.multi_2umpm_combo(basever) cfg['boxtype'] = 'cal-centos6' cfg['upgrade'] = vers cfg['enterprise'] = enterprise if not ConfigSpec._version_greaterthan(basever, '4.5.1-3' ): cfg['em'] = None runlist.append( (cfg, 'vagrant', tests.upgrade001()) ) cfg = configs.multi_1um_2pm(basever) cfg['boxtype'] = 'cal-debian6' cfg['upgrade'] = vers cfg['enterprise'] = enterprise if not ConfigSpec._version_greaterthan(basever, '4.5.1-3' ): cfg['em'] = None runlist.append( (cfg, 'vagrant', tests.upgrade001()) ) if ConfigSpec._version_greaterthan(basever, '3.5.1-5' ): cfg = configs.multi_2umpm_combo(basever) cfg['boxtype'] = 'cal-precise64' cfg['idbuser'] = '******' cfg['upgrade'] = vers cfg['enterprise'] = enterprise if not ConfigSpec._version_greaterthan(basever, '4.5.1-3' ): cfg['em'] = None runlist.append( (cfg, 'vagrant', tests.upgrade001()) ) return runlist
def run(self): """Convert EM db installreq json string to a configspec""" # direct mapping of installreq properties to configspec properties # em db install properties --> configspec properties # ------------------------ --------------------- # cluster_name name # cluster_info.infinidb_version idbversion # cluster_info.dbroots_per_pm rolespec.pm.dbroots_per # cluster_info.dbroot_list rolespec.pm.dbroots_list # (for future use) # cluster_info.infinidb_user idbuser # cluster_info.storage_type # "local" # "gluster" datdup set to True # "hdfs" empty hadoop entry # cluster_info.pm_query pm_query # cluster_info.um_replication Not Supported # role_info rolespec.pm.count # # configspec properties that are constant or not applicable # --------------------------------------------------------- # storage "internal" # binary True # upgrade False # enterprise True # hadoop.version not used (uses default) # boxtype N/A # em N/A # rolespec.pm.memory N/A # rolespec.pm.cpus N/A cfgspec_dict = {} cfgspec_dict["name"] = self.__req["cluster_name"] cfgspec_dict["idbversion"] = self.__req["cluster_info"]["infinidb_version"] cfgspec_dict["idbuser"] = self.__req["cluster_info"]["infinidb_user"] if self.__req["cluster_info"]["storage_type"] == "gluster": cfgspec_dict["datdup"] = True else: cfgspec_dict["datdup"] = False cfgspec_dict["binary"] = True cfgspec_dict["storage"] = "internal" cfgspec_dict["upgrade"] = False cfgspec_dict["enterprise"] = True cfgspec_dict["pm_query"] = self.__req["cluster_info"]["pm_query"] if self.__req["cluster_info"]["storage_type"] == "hdfs": cfgspec_dict["hadoop"] = {} # use hadoop defaults cfgspec_dict["storage"] = "hdfs" pmCount = 0 umCount = 0 cfgspec_dict["rolespec"] = {} machines = {} dbroots_per_pm = self.__req["cluster_info"]["dbroots_per_pm"] for key in self.__req["role_info"]: if key.startswith("pm"): pmCount += 1 m = {} m["ip"] = self.__req["role_info"][key] m["hostname"] = self.__req["role_info"][key] dbroots = [] pmnum = eval(key[2:]) for j in range(1, dbroots_per_pm + 1): dbroots.append(j + ((pmnum - 1) * dbroots_per_pm)) m["dbroots"] = dbroots machines[key] = m if key.startswith("um"): umCount += 1 m = {} m["ip"] = self.__req["role_info"][key] m["hostname"] = self.__req["role_info"][key] machines[key] = m if pmCount > 0: cfgspec_dict["rolespec"]["pm"] = { "count": pmCount, "dbroots_per": self.__req["cluster_info"]["dbroots_per_pm"], } else: # throw exception if no PM's present pass if umCount > 0: cfgspec_dict["rolespec"]["um"] = {"count": umCount} cfgspec_json = json.dumps(cfgspec_dict) machines_json = json.dumps(machines) Log = logutils.getLogger("installdatabase") Log.info("configspec: %s" % cfgspec_json) Log.info("machines: %s" % machines_json) cfg = ConfigSpec(cfgspec_json) cfg.validate() return cfg, machines
def testVersionGT(self): self.assertTrue( ConfigSpec._version_greaterthan('3.5', '3.5.1-5')) self.assertFalse( ConfigSpec._version_greaterthan('3.5.1', '3.5.1-5')) self.assertTrue( ConfigSpec._version_greaterthan('3.5.1.1-2', '3.5.1-5')) self.assertTrue( ConfigSpec._version_greaterthan('3.5.2-1', '3.5.1-5')) self.assertTrue( ConfigSpec._version_greaterthan('3.5', '2.2')) self.assertFalse( ConfigSpec._version_greaterthan('3.5', '4.0')) self.assertTrue( ConfigSpec._version_greaterthan('3.5', '3.0')) self.assertFalse( ConfigSpec._version_greaterthan('3.0', '3.5')) self.assertTrue( ConfigSpec._version_greaterthan('3.5', '3.5')) self.assertFalse( ConfigSpec._version_greaterthan('3.5.1-5','3.5')) self.assertFalse( ConfigSpec._version_greaterthan('3.5.1','3.5.2')) self.assertTrue( ConfigSpec._version_greaterthan('3.5.2','3.5.1')) self.assertTrue( ConfigSpec._version_greaterthan('3.5.2','3.5.2')) self.assertTrue( ConfigSpec._version_greaterthan('3.5.2-1','3.5.2-1')) self.assertTrue( ConfigSpec._version_greaterthan('3.5.2-2','3.5.2-1')) self.assertFalse( ConfigSpec._version_greaterthan('3.5.2-2','3.5.2-3')) self.assertTrue( ConfigSpec._version_greaterthan('3.5.2.1-1','3.5.2.1-1')) self.assertTrue( ConfigSpec._version_greaterthan('3.5.2.1-2','3.5.2.1-1')) self.assertFalse( ConfigSpec._version_greaterthan('3.5.2.1-2','3.5.2.1-3')) self.assertTrue( ConfigSpec._version_greaterthan('3.5.2.1','3.5.2.0')) self.assertFalse( ConfigSpec._version_greaterthan('3.5.2.1','3.5.2.2')) self.assertTrue( ConfigSpec._version_greaterthan('3.5.2.1-1','3.5.2.1')) self.assertFalse( ConfigSpec._version_greaterthan('3.5.2.1','3.5.2.1-1')) self.assertTrue( ConfigSpec._version_greaterthan('Latest','3.5.2.1')) self.assertTrue( ConfigSpec._version_greaterthan('Latest','3.5')) self.assertTrue( ConfigSpec._version_greaterthan('Latest','4.0')) self.assertFalse( ConfigSpec._version_greaterthan('3.5','Latest')) self.assertFalse( ConfigSpec._version_greaterthan('2.2.7-2','2.2.10-1')) self.assertTrue( ConfigSpec._version_greaterthan('2.2.10-1','2.2.7-2')) self.assertTrue( ConfigSpec._version_greaterthan('4.0.0-1','4.0.0-0')) self.assertFalse( ConfigSpec._version_greaterthan('4.0.0-0','4.0.0-1')) self.assertTrue( ConfigSpec._version_greaterthan('4.0.0-1','4.0.0-1_old')) self.assertFalse( ConfigSpec._version_greaterthan('4.0.0-1_old','4.0.0-1'))
def run_cmd(self): """ Prepare and run the ansible playbook command for the operation type specified in the constructor """ self._rundir = self._cluster.get_rundir() self._pkgdir = self._cluster.get_pkgdir() self._pkgfile = self._cluster.get_pkgfile() self._idbuser = self._cluster.config()["idbuser"] eflag = self._cluster.config()["enterprise"] if eflag: self._entflag = "true" else: self._entflag = "false" self._version = self._pkgfilenameparser.get_pkg_version(self._pkgfile) self._hadoop = self._cluster.config()["hadoop"] self._hdfsflag = "false" if self._hadoop: self._hdfsflag = "true" self._upgfile = self._cluster.get_upgfile() self._upgversion = None if self._upgfile: self._upgversion = self._pkgfilenameparser.get_pkg_version(self._upgfile) m = self._cluster.machine("pm1") self._pm1_ip = m["ip"] self._postconfig_opts = self._cluster.get_postconfig_opts() # Add -em to postconfig flags for version 4.6 and up if self._optype == "pkginstall": if ConfigSpec._version_greaterthan(self._version, "4.6.0-0"): self._postconfig_opts += " -em" (ansible_yml, cmdargs) = self._prepare_playbook_pkginstall() elif self._optype == "pkgupgrade": if ConfigSpec._version_greaterthan(self._upgversion, "4.6.0-0"): self._postconfig_opts += " -em" (ansible_yml, cmdargs) = self._prepare_playbook_pkgupgrade() elif self._optype == "bininstall": if ConfigSpec._version_greaterthan(self._version, "4.6.0-0"): self._postconfig_opts += " -em" (ansible_yml, cmdargs) = self._prepare_playbook_bininstall() elif self._optype == "binupgrade": if ConfigSpec._version_greaterthan(self._upgversion, "4.6.0-0"): self._postconfig_opts += " -em" (ansible_yml, cmdargs) = self._prepare_playbook_binupgrade() else: raise Exception("Unsupported ansible playbook type to run: %s" % self._optype) extra_playdir = self._cluster.get_extra_playbook_dir() p = PlaybookMgr(os.path.basename(self._rundir), extra_playdir) # create ansible inventory file with list of hosts; # should already exist for an EM-triggered install. full_inv_file = "%s/%s" % (p.get_rootdir(), self._inventory_filename) if not os.path.exists(full_inv_file): machines = self._cluster.machines() iplist = [] infnodelist = [] for key in machines: m = machines[key] iplist.append(m["ip"]) # if we are using the EM in invm mode we don't want that # node to participate in the normal InfiniDB install if key != "em1": # f.write("key: %s.calpont.com; ip: %s\n" % (key,m['ip'])) infnodelist.append(m["ip"]) ipdict = {"all": iplist, "infinidb_nodes": infnodelist} p.write_inventory(self._inventory_filename, ipdict) # create ansible.cfg file; # should already exist for an EM-triggered install. full_ans_file = "%s/%s" % (p.get_rootdir(), "ansible.cfg") if not os.path.exists(full_ans_file): keytext = self._cluster.get_sshkey_text() p.config_ssh(self._idbuser, keytext) # execute playbook thru PlaybookMgr self._ansible_file = ansible_yml self._extra_vars = cmdargs rc, results, out, err = p.run_playbook(ansible_yml, self._inventory_filename, playbook_args=cmdargs) return rc, results, out, err
def run(self): """Convert EM db installreq json string to a configspec""" # direct mapping of installreq properties to configspec properties # em db install properties --> configspec properties # ------------------------ --------------------- # cluster_name name # cluster_info.infinidb_version idbversion # cluster_info.dbroots_per_pm rolespec.pm.dbroots_per # cluster_info.dbroot_list rolespec.pm.dbroots_list # (for future use) # cluster_info.infinidb_user idbuser # cluster_info.storage_type # "local" # "gluster" datdup set to True # "hdfs" empty hadoop entry # cluster_info.pm_query pm_query # cluster_info.um_replication Not Supported # role_info rolespec.pm.count # # configspec properties that are constant or not applicable # --------------------------------------------------------- # storage "internal" # binary True # upgrade False # enterprise True # hadoop.version not used (uses default) # boxtype N/A # em N/A # rolespec.pm.memory N/A # rolespec.pm.cpus N/A cfgspec_dict = {} cfgspec_dict['name'] = self.__req['cluster_name'] cfgspec_dict['idbversion'] = self.__req['cluster_info'][ 'infinidb_version'] cfgspec_dict['idbuser'] = self.__req['cluster_info']['infinidb_user'] if self.__req['cluster_info']['storage_type'] == 'gluster': cfgspec_dict['datdup'] = True else: cfgspec_dict['datdup'] = False cfgspec_dict['binary'] = True cfgspec_dict['storage'] = 'internal' cfgspec_dict['upgrade'] = False cfgspec_dict['enterprise'] = True cfgspec_dict['pm_query'] = self.__req['cluster_info']['pm_query'] if self.__req['cluster_info']['storage_type'] == 'hdfs': cfgspec_dict['hadoop'] = {} # use hadoop defaults cfgspec_dict['storage'] = 'hdfs' pmCount = 0 umCount = 0 cfgspec_dict['rolespec'] = {} machines = {} dbroots_per_pm = self.__req['cluster_info']['dbroots_per_pm'] for key in self.__req['role_info']: if key.startswith('pm'): pmCount += 1 m = {} m['ip'] = self.__req['role_info'][key] m['hostname'] = self.__req['role_info'][key] dbroots = [] pmnum = eval(key[2:]) for j in range(1, dbroots_per_pm + 1): dbroots.append(j + ((pmnum - 1) * dbroots_per_pm)) m['dbroots'] = dbroots machines[key] = m if key.startswith('um'): umCount += 1 m = {} m['ip'] = self.__req['role_info'][key] m['hostname'] = self.__req['role_info'][key] machines[key] = m if pmCount > 0: cfgspec_dict['rolespec']['pm'] = { 'count': pmCount, 'dbroots_per': self.__req['cluster_info']['dbroots_per_pm'] } else: # throw exception if no PM's present pass if umCount > 0: cfgspec_dict['rolespec']['um'] = {'count': umCount} cfgspec_json = json.dumps(cfgspec_dict) machines_json = json.dumps(machines) Log = logutils.getLogger('installdatabase') Log.info('configspec: %s' % cfgspec_json) Log.info('machines: %s' % machines_json) cfg = ConfigSpec(cfgspec_json) cfg.validate() return cfg, machines
def testExtstore(self): # test the default value when not specified s = """{ "name" : "e-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "rolespec" : { "pm" : { "count" : 2, "memory" : 1024, "dbroots_per" : 2 }, "um" : { "count" : 1, "memory" : 1024 } } } """ cfg = ConfigSpec(s) self.assertTrue( cfg.has_key('storage') ) self.assertEqual( cfg['storage'], 'internal' ) # test with external storage enabled s = """{ "name" : "e-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "storage" : "external", "rolespec" : { "pm" : { "count" : 2, "memory" : 1024, "dbroots_per" : 2 }, "um" : { "count" : 1, "memory" : 1024 } } } """ cfg = ConfigSpec(s) self.assertTrue( cfg.has_key('storage') ) self.assertEqual( cfg['storage'], 'external' ) # test incorrect setting for external storage flag s = """{ "name" : "e-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "storage" : true, "rolespec" : { "pm" : { "count" : 2, "memory" : 1024, "dbroots_per" : 2 }, "um" : { "count" : 1, "memory" : 1024 } } } """ with self.assertRaisesRegexp(Exception,"ConfigSpec has wrong type for attribute storage.*"): cfg = ConfigSpec(s)
def testBinary(self): # test the default value when not specified s = """{ "name" : "a-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "rolespec" : { "pm" : { "count" : 4, "memory" : 1024, "dbroots_per" : 2 }, "um" : { "count" : 1, "memory" : 1024 } } } """ cfg = ConfigSpec(s) self.assertTrue( cfg.has_key('binary') ) self.assertFalse( cfg['binary'] ) s = """{ "name" : "a-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "binary" : true, "rolespec" : { "pm" : { "count" : 4, "memory" : 1024, "dbroots_per" : 2 }, "um" : { "count" : 1, "memory" : 1024 } } } """ cfg = ConfigSpec(s) self.assertTrue( cfg.has_key('binary') ) self.assertTrue( cfg['binary'] ) s = """{ "name" : "a-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "binary" : false, "rolespec" : { "pm" : { "count" : 4, "memory" : 1024, "dbroots_per" : 2 }, "um" : { "count" : 1, "memory" : 1024 } } } """ cfg = ConfigSpec(s) self.assertTrue( cfg.has_key('binary') ) self.assertFalse( cfg['binary'] ) s = """{ "name" : "a-cluster", "idbversion" : "3.5.1-4", "boxtype" : "cal-precise64", "idbuser" : "calpont", "binary" : "true", "rolespec" : { "pm" : { "count" : 4, "memory" : 1024, "dbroots_per" : 2 }, "um" : { "count" : 1, "memory" : 1024 } } } """ with self.assertRaisesRegexp(Exception,"ConfigSpec has wrong type for attribute binary.*"): cfg = ConfigSpec(s)
def _alloc_construct(self, cluster): '''create a new vmi instance.''' if not cluster.config().has_key('boxtype'): raise Exception("Vagrant cluster creation requires a boxtype in the ConfigSpec") # this hadoop validation check was formerly in configspec, but # moved to here to remove autooam/vagboxes dependency from # emtools/configspec if cluster.config().has_key('hadoop') and cluster.config()['hadoop']: if not vagboxes.hadoop_support(cluster.config()['boxtype']): raise Exception("Hadoop not supported on boxtype %s" % self.jsonmap['boxtype']) self._subnet = self._salloc.alloc(cluster) # first we want to look for our root directory, make sure it # does not already exist and then create it root = common.props['vmi.vagrantvmi.vagrantdir'] utils.mkdir_p(root) self._rundir = '%s/%s_%s' % (root, cluster.name(), str(cluster.id())) os.makedirs(self._rundir) # this is where we will write stdout and stderr for any calls # executed agaist this VMI self._outfile = "%s/%s.out" % (self._rundir, cluster.name()) self._defmemsize = common.props['vmi.vagrantvmi.defmemsize'] self._defcpus = common.props['vmi.vagrantvmi.defcpus'] # do a sanity check to make sure we don't ask for a non-existent package # we only support enterprise=False for versions 4.0 and later entpkg = cluster.config()['enterprise'] if ConfigSpec._version_greaterthan('4.0.0-0',cluster.config()['idbversion']): Log.info('resetting enterprise to True for version %s ' % cluster.config()['idbversion']) entpkg = True # make sure that our package exists vm = VersionManager() if cluster.config()['idbuser'] != 'root' or cluster.config()['binary']: ptype = 'binary' # set this to true in case not already set so that vagrant file writer # can depend on it being accurate cluster.config()['binary'] = True else: ptype = vagboxes.get_default_pkgtype(cluster.config()['boxtype']) self._pkgfile = vm.retrieve(cluster.config()['idbversion'], ptype, enterprise=entpkg) # handle the upgrade version if the user specified it upgfile = None upgvers = None if cluster.config()['upgrade']: upgfile = vm.retrieve(cluster.config()['upgrade'], ptype, enterprise=entpkg) upgvers = vm.get_pkg_version(upgfile) self._upgfile = upgfile # handle datdup package if the user requested it - note that the datdup # package is only relevant prior to version 4.0 datduppkgfile = None if cluster.config()['datdup'] and not ConfigSpec._version_greaterthan(cluster.config()['idbversion'],'4.0.0-0'): datduppkgfile = vm.retrieve(cluster.config()['idbversion'], ptype, datdup=True) self._alloc_machines() h = PostConfigureHelper() self._pfile = '%s/postconfigure.in' % self._rundir h.write_input(self._pfile, cluster, ptype) # @bug 5990: don't need to copy public key. vagrant # public access should already be setup when cluster # was instantiated. # copy public key to shared directory so that vagrant can access #utils.mkdir_p("%s/.ssh" % self._rundir) #shutil.copy( '%s.pub' % common.props['emtools.test.sshkeyfile'], # '%s/.ssh/public_key' % self._rundir) self._vfile = self._rundir + '/Vagrantfile' vfile = VagrantFileWriter( cluster, self._pkgfile, vm.get_pkg_version(self._pkgfile), datduppkgfile, self._upgfile, upgvers, self._subnet, self._rundir) vfile.writeVagrantFile( self._vfile ) cluster.vmi(self) # For external DBRoot storage: delete/recreate dataN directories # locally, to be NFS mounted for use on each PM if cluster.config()['storage'] == 'external': rootCount = cluster.config().total_dbroot_count() for i in range( rootCount ): dbRootDir = '%s/data%d' % (self._rundir, i+1) if os.path.exists( dbRootDir ): shutil.rmtree( dbRootDir ) os.mkdir( dbRootDir )
s = """{ "name" : "e-cluster", "idbversion" : "4.5.0-1", "boxtype" : "cal-centos6", "rolespec" : { "pm" : { "count" : 2, "memory" : 1024, "dbroots_per" : 2 } } } """ common.props['cluster.cluster.eminvm'] = True try: cfg = ConfigSpec(s) except Exception, exc: common.props['cluster.cluster.eminvm'] = False raise exc common.props['cluster.cluster.eminvm'] = False self.assertTrue( cfg.has_key('em') ) self.assertEqual( cfg['em']['present'], True ) self.assertEqual( cfg['em']['emhost'], 'localhost' ) self.assertEqual( cfg['em']['emport'], 9090 ) self.assertEqual( cfg['em']['oamserver_role'], 'um1' ) self.assertEqual( cfg['em']['invm'], True ) self.assertEqual( cfg['em']['boxtype'], 'cluster' ) self.assertEqual( cfg['em']['version'], 'Latest' ) if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.testName']