def check_status_shutdown_error_test(self): (osf, outfile) = tempfile.mkstemp() os.close(osf) dir = os.path.expanduser("~/.cloudinitd/") conf_file = self.plan_basedir + "/terminate/top.conf" cb = CloudInitD(dir, conf_file, terminate=False, boot=True, ready=True) cb.start() cb.block_until_complete(poll_period=1.0) runname = cb.run_name svc = cb.get_service("sampleservice") p = svc.shutdown() rc = p.poll() while not rc: rc = p.poll() time.sleep(0.1) if 'CLOUDINITD_TESTENV' in os.environ: bkfab = os.environ['CLOUDINITD_FAB'] bkssh = os.environ['CLOUDINITD_SSH'] os.environ['CLOUDINITD_FAB'] = "/bin/false" os.environ['CLOUDINITD_SSH'] = "/bin/false" rc = cloudinitd.cli.boot.main(["-O", outfile, "-v","-v","-v","-v", "status", runname]) if 'CLOUDINITD_TESTENV' in os.environ: os.environ['CLOUDINITD_FAB'] = bkfab os.environ['CLOUDINITD_SSH'] = bkssh self._dump_output(outfile) n = "ERROR" line = self._find_str(outfile, n) self.assertNotEqual(line, None) rc = cloudinitd.cli.boot.main(["-O", outfile, "terminate", "%s" % (runname)]) self.assertEqual(rc, 0)
def check_repair_error_test(self): if 'CLOUDINITD_TESTENV' in os.environ: # we cannot run this one in fake mode yet return (osf, outfile) = tempfile.mkstemp() os.close(osf) dir = os.path.expanduser("~/.cloudinitd/") conf_file = "%s/outputdep/top.conf" % (cloudinitd.nosetests.g_plans_dir) cb = CloudInitD(dir, conf_file, terminate=False, boot=True, ready=True) cb.start() cb.block_until_complete(poll_period=1.0) runname = cb.run_name svc = cb.get_service("onelvl1") secret = svc.get_attr_from_bag('iaas_secret') key = svc.get_attr_from_bag('iaas_key') iaas_url= svc.get_attr_from_bag('iaas_url') instance_id = svc.get_attr_from_bag('instance_id') con = iaas_get_con(svc._svc, key=key, secret=secret, iaasurl=iaas_url) instance = con.find_instance(instance_id) instance.terminate() print "start repair" rc = cloudinitd.cli.boot.main(["-O", outfile, "-v","-v","-v","repair", runname]) self._dump_output(outfile) n = "ERROR" line = self._find_str(outfile, n) self.assertNotEqual(line, None) print "start terminate" rc = cloudinitd.cli.boot.main(["terminate", "%s" % (runname)]) self.assertEqual(rc, 0)
def get_status_test(self): self.plan_basedir = cloudinitd.nosetests.g_plans_dir dir = tempfile.mkdtemp() conf_file = self.plan_basedir + "/oneservice/top.conf" cb = CloudInitD(dir, conf_file, terminate=False, boot=True, ready=True) cb.start() cb.block_until_complete(poll_period=1.0) svc = cb.get_service("sampleservice") status = svc.get_iaas_status() self.assertEqual("running", status, "status is %s" % (status)) cb = CloudInitD(dir, db_name=cb.run_name, terminate=True, boot=False, ready=False) cb.shutdown() cb.block_until_complete(poll_period=1.0) fname = cb.get_db_file() os.remove(fname)
def dep_keys_test(self): self.plan_basedir = cloudinitd.nosetests.g_plans_dir dir = tempfile.mkdtemp() conf_file = self.plan_basedir + "/oneservice/top.conf" cb = CloudInitD(dir, conf_file, terminate=False, boot=True, ready=True) cb.start() cb.block_until_complete(poll_period=1.0) svc = cb.get_service("sampleservice") attr_keys = svc.get_keys_from_bag() # until we figure out how to get __dict__ values from the sqlalchemy objects this will be in complete expectations = [ "hostname", "instance_id", # "name", # "level_id", # "image", # "iaas", # "allocation", # "keyname", # "localkey", # "username", # "scp_username", # "readypgm", # "hostname", # "bootconf", # "bootpgm", # "instance_id", # "iaas_url", # "iaas_key", # "iaas_secret", # "contextualized", # "securitygroups", "webmessage" ] for e in expectations: self.assertTrue(e in attr_keys, "The key %s should exist in %s" % (e, str(attr_keys))) cb = CloudInitD(dir, db_name=cb.run_name, terminate=True, boot=False, ready=False) cb.shutdown() cb.block_until_complete(poll_period=1.0) fname = cb.get_db_file() os.remove(fname)
def test_env_set(self): if cloudinitd.nosetests.is_a_test(): raise SkipTest() dir = tempfile.mkdtemp() conf_file = self.plan_basedir + "/oneservice/top.conf" cb = CloudInitD(dir, conf_file, terminate=False, boot=True, ready=True) cb.start() cb.block_until_complete(poll_period=1.0) svc = cb.get_service("sampleservice") ssh_cmd = svc.get_ssh_command() + " ls -l %s" % (self.remote_dir) # form test directory command print ssh_cmd rc = os.system(ssh_cmd) self.assertEquals(rc, 0) cb = CloudInitD(dir, db_name=cb.run_name, terminate=True, boot=False, ready=False) cb.shutdown() cb.block_until_complete(poll_period=1.0)
def check_status_error_test(self): (osf, outfile) = tempfile.mkstemp() os.close(osf) dir = os.path.expanduser("~/.cloudinitd/") conf_file = self.plan_basedir + "/terminate/top.conf" cb = CloudInitD(dir, conf_file, terminate=False, boot=True, ready=True) cb.start() cb.block_until_complete(poll_period=1.0) runname = cb.run_name svc = cb.get_service("sampleservice") secret = svc.get_attr_from_bag('iaas_secret') key = svc.get_attr_from_bag('iaas_key') iaas_url = svc.get_attr_from_bag('iaas_url') instance_id = svc.get_attr_from_bag('instance_id') con = iaas_get_con(None, key=key, secret=secret, iaasurl=iaas_url) instance = con.find_instance(instance_id) instance.terminate() if 'CLOUDINITD_TESTENV' in os.environ: bkfab = os.environ['CLOUDINITD_FAB'] bkssh = os.environ['CLOUDINITD_SSH'] os.environ['CLOUDINITD_FAB'] = "/bin/false" os.environ['CLOUDINITD_SSH'] = "/bin/false" rc = cloudinitd.cli.boot.main(["-O", outfile, "-v","-v","-v","-v", "status", runname]) if 'CLOUDINITD_TESTENV' in os.environ: os.environ['CLOUDINITD_FAB'] = bkfab os.environ['CLOUDINITD_SSH'] = bkssh self._dump_output(outfile) n = "ERROR" line = self._find_str(outfile, n) self.assertNotEqual(line, None) rc = cloudinitd.cli.boot.main(["-O", outfile, "terminate", "%s" % (runname)]) if 'CLOUDINITD_TESTENV' in os.environ: # in fake mode we cannot detect that an instance was killed self.assertEqual(rc, 0) else: self.assertNotEqual(rc, 0)
class TestLogfetch: def setup(self): """ Build a fake test environment, with the sleepers cloudinit.d plan. We can grab all logged messages from c.log.transcript. """ self.test_run_name = "TESTRUN" self.config = ConfigParser.RawConfigParser() self.config.add_section("events") self.runlogdir = tempfile.mkdtemp() self.config.set("events", "runlogdir", self.runlogdir) self.vmlogdir = tempfile.mkdtemp() self.config.set("events", "vmlogdir", self.vmlogdir) self.optdict = {} self.optdict[em_args.NAME.name] = self.test_run_name self.params = DefaultParameters(self.config, None) self.params.optdict = self.optdict remote_svc_adapter = FakeRemoteSvcAdapter() self.common = FakeCommon() self.modules = FakeModules(remote_svc_adapter=remote_svc_adapter) # Note that we monkey-patch the get_scp_command_str function # to prepend "echo" to it. That way we can still allow the # command to be run, but we can still see how it actually gets # constructed runlogs = DefaultRunlogs(self.params, self.common) runlogs.validate() self.modules.runlogs = runlogs new_get_scp = make_fake_scp_command_str(runlogs, runlogs.get_scp_command_str) self.modules.runlogs.get_scp_command_str = types.MethodType(new_get_scp, self.modules.runlogs) self.test_dir = os.path.dirname(__file__) self.test_db_dir = tempfile.mkdtemp() self.test_cd_config = os.path.join(self.test_dir, "configs/main.conf") self.cloudinitd = CloudInitD(self.test_db_dir, self.test_cd_config, self.test_run_name) def teardown(self): shutil.rmtree(self.test_db_dir) shutil.rmtree(self.vmlogdir) shutil.rmtree(self.runlogdir) def test_fetch_one_vm(self): from epumgmt.main.em_core_logfetch import _fetch_one_vm test_vm = epumgmt.api.RunVM() _fetch_one_vm(self.params, self.common, self.modules, self.test_run_name, test_vm, cloudinitd=self.cloudinitd) def test_fetch_by_service_name(self): """ This test constructs a RunVM instance, and then asks logfetch to grab its logs. We confirm that the correct scp call was made indirectly by examining the transcript of the log files. We also neuter the scp call by prefixing it with echo, since we're not trying to scp from a real host. """ from epumgmt.main.em_core_logfetch import fetch_by_service_name test_service_name = "provisioner" test_provisioner = epumgmt.api.RunVM() test_provisioner.service_type = test_service_name test_provisioner_hostname = "test.hostname.example.com" test_provisioner.hostname = test_provisioner_hostname test_provisioner_vmlogdir = "/some/fake/logdir" test_provisioner.vmlogdir = test_provisioner_vmlogdir test_provisioner_runlogdir = "/some/fake/local/runlogdir" test_provisioner.runlogdir = test_provisioner_runlogdir test_provisioner_instanceid = "i-TEST" test_provisioner.instanceid = test_provisioner_instanceid test_run_vms = [] test_run_vms.append(test_provisioner) self.modules.persistence.store_run_vms(self.test_run_name, test_run_vms) # Be tricky and patch in our hostname self.cloudinitd.get_service("provisioner")._svc._s.hostname = test_provisioner_hostname fetch_by_service_name(self.params, self.common, self.modules, self.test_run_name, test_service_name, self.cloudinitd) run_commands = [message for (level, message) in self.common.log.transcript if level == "DEBUG" and "command =" in message] # confirm that scp command gets called for our service expected_scp_pattern = ".*@%s:%s %s" % (test_provisioner_hostname, test_provisioner_vmlogdir, test_provisioner_runlogdir) # only expect one command to be run assert len(run_commands) == 1 assert re.search(expected_scp_pattern, run_commands[0]) def test_fetch_all(self): from epumgmt.main.em_core_logfetch import fetch_all test_service_name = "provisioner" test_provisioner = epumgmt.api.RunVM() test_provisioner.service_type = test_service_name test_provisioner_hostname = "test.hostname.example.com" test_provisioner.hostname = test_provisioner_hostname test_provisioner_vmlogdir = "/some/fake/logdir" test_provisioner.vmlogdir = test_provisioner_vmlogdir test_provisioner_runlogdir = "/some/fake/local/runlogdir" test_provisioner.runlogdir = test_provisioner_runlogdir test_provisioner_instanceid = "i-TEST" test_provisioner.instanceid = test_provisioner_instanceid # Be tricky and patch in our hostname self.cloudinitd.get_service("provisioner")._svc._s.hostname = test_provisioner_hostname # Two workers. Note that they have the same hostname # to simulate the issue where we have a terminated worker # and the second one was booted with the same hostname as # the first test_worker_0 = epumgmt.api.RunVM() test_worker_0_service_type = "iamaworker" test_worker_0.service_type = test_worker_0_service_type test_worker_0_hostname = "worker0.example.com" test_worker_0.hostname = test_worker_0_hostname test_worker_0_instanceid = "i-TESTWORKER0" test_worker_0.instanceid = test_worker_0_instanceid test_worker_0_vmlogdir = "/some/fake/logdir" test_worker_0.vmlogdir = test_worker_0_vmlogdir test_worker_0_runlogdir = "/some/fake/%s/runlogdir" % test_worker_0_instanceid test_worker_0.runlogdir = test_worker_0_runlogdir test_worker_0_iaas_state = epustates.TERMINATED test_worker_0_events = [Event(name="iaas_state", timestamp=1000, state=test_worker_0_iaas_state)] test_worker_0.events = test_worker_0_events test_worker_1 = epumgmt.api.RunVM() test_worker_1_service_type = "iamaworker" test_worker_1.service_type = test_worker_0_service_type test_worker_1.hostname = test_worker_0_hostname test_worker_1_instanceid = "i-TESTWORKER1" test_worker_1.instanceid = test_worker_1_instanceid test_worker_1_vmlogdir = "/some/fake/logdir" test_worker_1.vmlogdir = test_worker_1_vmlogdir test_worker_1_runlogdir = "/some/fake/%s/runlogdir/" % test_worker_1_instanceid test_worker_1.runlogdir = test_worker_1_runlogdir test_worker_1_iaas_state = epustates.RUNNING test_worker_1_events = [Event(name="iaas_state", timestamp=1000, state=test_worker_1_iaas_state)] test_worker_1.events = test_worker_1_events test_run_vms = [] test_run_vms.append(test_provisioner) test_run_vms.append(test_worker_0) test_run_vms.append(test_worker_1) self.modules.persistence.store_run_vms(self.test_run_name, test_run_vms) fetch_all(self.params, self.common, self.modules, self.test_run_name, self.cloudinitd) run_commands = [message for (level, message) in self.common.log.transcript if level == "DEBUG" and "command =" in message] # We have two VMs we should fetch from assert len(run_commands) == 2 # Confirm that we never scp log files from the TERMINATED VM for scp_command in run_commands: assert scp_command.find(test_worker_0_instanceid) == -1