Beispiel #1
0
def reboot(options, args):
    """
    Reboot an already booted plan.  You must supply the run name of the booted plan.
    """
    if len(args) < 2:
        print "The reboot command requires a run name.  See --help"
        return 1
    dbname = args[1]
    cb = CloudInitD(options.database, db_name=dbname, log_level=options.loglevel, level_callback=level_callback, service_callback=service_callback, logdir=options.logdir, terminate=True, boot=False, ready=False, continue_on_error=True)
    print_chars(1, "Rebooting %s\n" % (cb.run_name))
    cb.shutdown()
    try:
        try:
            print_chars(1, "Terminating all services %s\n" % (cb.run_name))
            options.logger.info("Terminating all services")
            cb.block_until_complete(poll_period=0.1)
            options.logger.info("Starting services back up")
            cb = CloudInitD(options.database, db_name=dbname, log_level=options.loglevel, level_callback=level_callback, service_callback=service_callback, logdir=options.logdir, terminate=False, boot=True, ready=True, continue_on_error=False)
            print_chars(1, "Booting all services %s\n" % (cb.run_name))
            cb.start()
            cb.block_until_complete(poll_period=0.1)
            return 0
        except CloudServiceException, svcex:
            print svcex
        except MultilevelException, mex:
            print mex
Beispiel #2
0
def iceage(options, args):
    if len(args) < 2:
        print "The iceage command requires a run name.  See --help"
        return 1
    dbname = args[1]

    cb = CloudInitD(options.database, db_name=dbname, log_level=options.loglevel, logdir=options.logdir, terminate=False, boot=False, ready=True)
    ha = cb.get_iaas_history()

    print_chars(0, "ID      \t:\tstate:\tassociated service\n")
    for h in ha:
        print_chars(1, "%s\t:\t%s\t:\t" % (h.get_id(), h.get_service_name()))
        state = h.get_state()
        clean = False
        color = None
        if state == "running":
            color = "green"
            clean = True
        elif state == "terminated" or state == "shutting-down":
            color="red"
        elif state == "pending":
            color="yellow"
            clean = True

        print_chars(1, ": %s\n" % (state), color=color)
        if options.kill and clean:
            print_chars(1, "Terminating %s\n" % (h.get_id()), bold=True)
            h.terminate()

    return 0
    def _start_one(self, conf_file):

        dir = tempfile.mkdtemp()
        conf_file = self.plan_basedir + "/" + conf_file + "/top.conf"
        cb = CloudInitD(dir, conf_file, terminate=False, boot=True, ready=True)
        cb.start()
        cb.block_until_complete(poll_period=1.0)
        return (dir, cb)
Beispiel #4
0
def launch_new(options, args):
    """
    Boot a new launch plan.  You must supply the path to a top level configuration file.  A run name will be displayed in the output.  See --help for more information.
    """

    if len(args) < 2:
        print "The boot command requires a top level file.  See --help"
        return 1

    config_file = args[1]
    print_chars(1, "Starting up run ")
    print_chars(1, "%s\n" % (options.name), inverse=True, color="green", bold=True)

    cb = CloudInitD(options.database, log_level=options.loglevel, db_name=options.name, config_file=config_file, level_callback=level_callback, service_callback=service_callback, logdir=options.logdir, terminate=False, boot=True, ready=True, fail_if_db_present=True)
    print_chars(3, "Logging to: %s%s.log\n"  % (options.logdir, options.name))

    if options.validate:
        print_chars(1, "Validating the launch plan.\n")
        errors = cb.boot_validate()
        if len(errors) > 0:
            print_chars(0, "The boot plan is not valid.\n", color = "red")
            for (svc, ex) in errors:
                print_chars(1, "Service %s had the error:\n" % (svc.name))
                print_chars(1, "\t%s" %(str(ex)))
            return 1

    if options.dryrun:
        test_env = _getenv_or_none('CLOUDINITD_TESTENV')
        host_time_env = _getenv_or_none('CLOUDINITD_CBIAAS_TEST_HOSTNAME_TIME')
        fab_env = _getenv_or_none('CLOUDINITD_FAB')
        ssh_env = _getenv_or_none('CLOUDINITD_SSH')

        print_chars(1, "Performing a dry run...\n", bold=True)
        os.environ['CLOUDINITD_TESTENV'] = "2"
        os.environ['CLOUDINITD_CBIAAS_TEST_HOSTNAME_TIME'] = "0.0"
        os.environ['CLOUDINITD_FAB'] = cloudinitd.find_true()
        os.environ['CLOUDINITD_SSH'] = cloudinitd.find_true()

        try:
            (rc, cb) = _launch_new(options, args, cb)
            print_chars(1, "Dry run successful\n", bold=True, color="green")
        finally:
            _setenv_or_none('CLOUDINITD_TESTENV', test_env)
            _setenv_or_none('CLOUDINITD_CBIAAS_TEST_HOSTNAME_TIME', host_time_env)
            _setenv_or_none('CLOUDINITD_FAB', fab_env)
            _setenv_or_none('CLOUDINITD_SSH', ssh_env)
            if not options.noclean:
                path = "%s/cloudinitd-%s.db" % (options.database, cb.run_name)
                if not os.path.exists(path):
                    raise Exception("That DB does not seem to exist: %s" % (path))
                os.remove(path)

        return rc

    (rc, cb) = _launch_new(options, args, cb)
    return rc
 def test_poll_to_soon_error(self):
     self.plan_basedir = self.plan_basedir = cloudinitd.nosetests.g_plans_dir
     dir = tempfile.mkdtemp()
     conf_file = self.plan_basedir + "/simplelevels/top.conf"
     cb = CloudInitD(dir, conf_file, terminate=False, boot=True, ready=True)
     try:
         cb.block_until_complete(poll_period=1.0)
         self.fail("exception should have been thrown")
     except APIUsageException, ex:
         pass
 def test_manyservices_one_vm_simple(self):
     self.plan_basedir = cloudinitd.nosetests.g_plans_dir
     dir = tempfile.mkdtemp()
     conf_file = self.plan_basedir + "/singlevmmanyservice/top.conf"
     cb = CloudInitD(dir, conf_file, terminate=False, boot=True, ready=True)
     cb.start()
     cb.block_until_complete(poll_period=1.0)
     cb = CloudInitD(dir, db_name=cb.run_name, terminate=True, boot=False, ready=False)
     cb.shutdown()
     cb.block_until_complete(poll_period=1.0)
def load(p, c, m, run_name, cloudinitd_dbdir, silent=False, terminate=False, wholerun=True):
    """Load any EPU related instances from a local cloudinit.d launch with the same run name.
    """
    
    try:
        cb = CloudInitD(cloudinitd_dbdir, db_name=run_name, terminate=terminate, boot=False, ready=False)
        cb.start()
        cb.block_until_complete()
    except APIUsageException, e:
        raise IncompatibleEnvironment("Problem loading records from cloudinit.d: %s" % str(e))
Beispiel #8
0
 def dep_not_found_test(self):
     self.plan_basedir = cloudinitd.nosetests.g_plans_dir
     dir = tempfile.mkdtemp()
     conf_file = self.plan_basedir + "/oneservice/top.conf"
     cb = CloudInitD(dir, conf_file, terminate=False, boot=True, ready=True)
     cb.start()
     ok = False
     try:
         cb.find_dep("notaservice", "whatever")
     except APIUsageException, ex:
         ok = True
 def test_badlevel_bootpgm(self):
     ilist_1 = self._get_running_vms()
     count1 = len(ilist_1)
     self.plan_basedir = cloudinitd.nosetests.g_plans_dir
     dir = tempfile.mkdtemp()
     conf_file = self.plan_basedir + "/badlevel2/top.conf"
     pass_ex = True
     cb = CloudInitD(dir, conf_file, terminate=False, boot=True, ready=True)
     try:
         cb.start()
         cb.block_until_complete(poll_period=1.0)
     except Exception, ex:
         pass_ex = True
Beispiel #10
0
 def connect_cloudinitd(self, must_exist=False):
     config = os.path.join(os.path.abspath(self.plan), 'launch.conf')
     home = os.environ['HOME']
     db_file = os.path.join(home, '.cloudinitd', 'cloudinitd-'+self.name+'.db')
     print '*** ' + db_file + ' must exist? ' + repr(must_exist)
     if must_exist and not os.path.exists(db_file):
         raise ApeException('cannot reconnect to cloudinitd -- launch does not exist')
     elif os.path.exists(db_file):
         self.util = CloudInitD(home + '/.cloudinitd', config_file=config, db_name=self.name, boot=False, ready=False, fail_if_db_present=False)
Beispiel #11
0
def _status(options, args):
    global g_repair

    dbname = args[1]
    c_on_e = not g_repair
    options.name = dbname

    cb = CloudInitD(options.database, db_name=dbname, log_level=options.loglevel, level_callback=level_callback, service_callback=service_callback, logdir=options.logdir, terminate=False, boot=False, ready=True, continue_on_error=c_on_e)
    print_chars(1, "Checking status on %s\n" % (cb.run_name))
    cb.start()
    try:
        try:
            cb.block_until_complete(poll_period=0.1)
        except CloudServiceException, svcex:
            print svcex
            return 1
        except MultilevelException, mex:
            print mex
            return 1
Beispiel #12
0
    def test_only_one_launched(self):
        if 'CLOUDINITD_CLEAN_ACCOUNT' not in os.environ:
            raise SkipTest()
        ilist_1 = self._get_running_vms()
        count1 = len(ilist_1)
        self.plan_basedir = cloudinitd.nosetests.g_plans_dir
        dir = tempfile.mkdtemp()
        conf_file = self.plan_basedir + "/singlevmmanyservice/top.conf"
        cb = CloudInitD(dir, conf_file, terminate=False, boot=True, ready=True)
        cb.start()
        cb.block_until_complete(poll_period=1.0)

        ilist_2 = self._get_running_vms()
        count2 = len(ilist_2)

        self.assertEqual(count1, count2 - 1, "there should be exactly 1 more VM in count 2: %d %d" % (count1, count2))

        cb = CloudInitD(dir, db_name=cb.run_name, terminate=True, boot=False, ready=False)
        cb.shutdown()
        cb.block_until_complete(poll_period=1.0)
    def check_status_shutdown_error_test(self):
        (osf, outfile) = tempfile.mkstemp()
        os.close(osf)
        dir = os.path.expanduser("~/.cloudinitd/")
        conf_file = self.plan_basedir + "/terminate/top.conf"
        cb = CloudInitD(dir, conf_file, terminate=False, boot=True, ready=True)
        cb.start()
        cb.block_until_complete(poll_period=1.0)
        runname = cb.run_name
        svc = cb.get_service("sampleservice")
        p = svc.shutdown()
        rc = p.poll()
        while not rc:
            rc = p.poll()
            time.sleep(0.1)

        if 'CLOUDINITD_TESTENV' in os.environ:
            bkfab = os.environ['CLOUDINITD_FAB']
            bkssh = os.environ['CLOUDINITD_SSH']
            os.environ['CLOUDINITD_FAB'] = "/bin/false"
            os.environ['CLOUDINITD_SSH'] = "/bin/false"

        rc = cloudinitd.cli.boot.main(["-O", outfile, "-v","-v","-v","-v", "status", runname])
        if 'CLOUDINITD_TESTENV' in os.environ:
            os.environ['CLOUDINITD_FAB'] = bkfab
            os.environ['CLOUDINITD_SSH'] = bkssh
        self._dump_output(outfile)
        n = "ERROR"
        line = self._find_str(outfile, n)
        self.assertNotEqual(line, None)

        rc = cloudinitd.cli.boot.main(["-O", outfile, "terminate",  "%s" % (runname)])
        self.assertEqual(rc, 0)
    def check_repair_error_test(self):
        if 'CLOUDINITD_TESTENV' in os.environ:
            # we cannot run this one in fake mode yet
            return
        (osf, outfile) = tempfile.mkstemp()
        os.close(osf)
        dir = os.path.expanduser("~/.cloudinitd/")
        conf_file = "%s/outputdep/top.conf" % (cloudinitd.nosetests.g_plans_dir)
        cb = CloudInitD(dir, conf_file, terminate=False, boot=True, ready=True)
        cb.start()
        cb.block_until_complete(poll_period=1.0)
        runname = cb.run_name
        svc = cb.get_service("onelvl1")

        secret = svc.get_attr_from_bag('iaas_secret')
        key = svc.get_attr_from_bag('iaas_key')
        iaas_url= svc.get_attr_from_bag('iaas_url')
        instance_id = svc.get_attr_from_bag('instance_id')
        con = iaas_get_con(svc._svc, key=key, secret=secret, iaasurl=iaas_url)
        instance = con.find_instance(instance_id)
        instance.terminate()

        print "start repair"
        rc = cloudinitd.cli.boot.main(["-O", outfile, "-v","-v","-v","repair", runname])
        self._dump_output(outfile)
        n = "ERROR"
        line = self._find_str(outfile, n)
        self.assertNotEqual(line, None)

        print "start terminate"
        rc = cloudinitd.cli.boot.main(["terminate",  "%s" % (runname)])
        self.assertEqual(rc, 0)
Beispiel #15
0
def reload_conf(options, args):
    """
    Reload an updated launch plan configuration into the database of the run name supplied with --name.  This is typically followed by a repair of the same run name.
    """
    if len(args) < 2:
        print "The reload command requires a top level file.  See --help"
        return 1

    config_file = args[1]
    print_chars(1, "Loading the launch plan for run ")
    print_chars(1, "%s\n" % (options.name), inverse=True, color="green", bold=True)
    cb = CloudInitD(options.database, log_level=options.loglevel, db_name=options.name, config_file=config_file, level_callback=level_callback, service_callback=service_callback, logdir=options.logdir, fail_if_db_present=False, terminate=False, boot=False, ready=False)
    if options.validate:
        print_chars(1, "Validating the launch plan.\n")
        errors = cb.boot_validate()
        if len(errors) > 0:
            print_chars(0, "The boot plan is not valid.\n", color = "red")
            for (svc, ex) in errors:
                print_chars(1, "Service %s had the error:\n" % (svc.name))
                print_chars(1, "\t%s" %(str(ex)))
            return 1
    return 0
Beispiel #16
0
def clean_ice(options, args):
    """
    Clean all orphaned VMs
    """
    if len(args) < 2:
        print "The iceage command requires a run name.  See --help"
        return 1
    dbname = args[1]

    cb = CloudInitD(options.database, db_name=dbname, log_level=options.loglevel, logdir=options.logdir, terminate=False, boot=False, ready=True)
    ha = cb.get_iaas_history()

    for h in ha:
        state = h.get_state()
        handle = h.get_service_iaas_handle()
        if state == "running":
            if handle != h.get_id():
                print_chars(2, "Terminating an orphaned VM %s\n" % (h.get_id()), bold=True)
                h.terminate()
            elif h.get_context_state() == cloudinitd.service_state_initial:
                print_chars(2, "Terminating pre-staged VM %s\n" % (h.get_id()), bold=True)
                h.terminate()

    return 0
Beispiel #17
0
def terminate(options, args):
    """
    Terminate an already booted plan.  You must supply the run name of the booted plan.
    """
    if len(args) < 2:
        print "The terminate command requires a run name.  See --help"
        return 1

    for dbname in args[1:]:
        options.name = dbname
        rc = 0
        try:
            cb = CloudInitD(options.database, log_level=options.loglevel, db_name=dbname, level_callback=level_callback, service_callback=service_callback, logdir=options.logdir, terminate=True, boot=False, ready=False, continue_on_error=True)
            print_chars(1, "Terminating %s\n" % (cb.run_name))
            cb.shutdown()

            cb.block_until_complete(poll_period=0.1)
            if not options.noclean:
                path = "%s/cloudinitd-%s.db" % (options.database, dbname)
                if not os.path.exists(path):
                    raise Exception("That DB does not seem to exist: %s" % (path))
                if not options.safeclean or (cb.get_exception() is None and not cb.get_all_exceptions()):
                    print_chars(1, "Deleting the db file %s\n" % (path))
                    os.remove(path)
                else:
                    print_chars(4, "There were errors when terminating %s, keeping db\n" % (cb.run_name))

            ex = cb.get_exception()
            if ex is None:
                ex_list = cb.get_all_exceptions()
                if ex_list:
                    ex = ex_list[-1]
            if ex is not None:
                print_chars(4, "An error occured %s" % (str(ex)))
                raise ex
        except CloudServiceException, svcex:
            print svcex
            rc = 1
        except Exception, mex:
            rc = 1
    def setup(self):
        """
        Build a fake test environment, with the sleepers cloudinit.d plan.

        We can grab all logged messages from c.log.transcript.
        """

        self.test_run_name = "TESTRUN"

        self.config = ConfigParser.RawConfigParser()
        self.config.add_section("events")
        self.runlogdir = tempfile.mkdtemp()
        self.config.set("events", "runlogdir", self.runlogdir)
        self.vmlogdir = tempfile.mkdtemp()
        self.config.set("events", "vmlogdir", self.vmlogdir)
        self.optdict = {}
        self.optdict[em_args.NAME.name] = self.test_run_name

        self.params = DefaultParameters(self.config, None)
        self.params.optdict = self.optdict
        remote_svc_adapter = FakeRemoteSvcAdapter()
        self.common = FakeCommon()
        self.modules = FakeModules(remote_svc_adapter=remote_svc_adapter)

        # Note that we monkey-patch the get_scp_command_str function
        # to prepend "echo" to it. That way we can still allow the 
        # command to be run, but we can still see how it actually gets
        # constructed
        runlogs = DefaultRunlogs(self.params, self.common)
        runlogs.validate()
        self.modules.runlogs = runlogs
        new_get_scp = make_fake_scp_command_str(runlogs, runlogs.get_scp_command_str)
        self.modules.runlogs.get_scp_command_str = types.MethodType(new_get_scp, self.modules.runlogs)

        self.test_dir = os.path.dirname(__file__)
        self.test_db_dir = tempfile.mkdtemp()
        self.test_cd_config = os.path.join(self.test_dir, "configs/main.conf")
        self.cloudinitd = CloudInitD(self.test_db_dir, self.test_cd_config, self.test_run_name)
    def check_status_error_test(self):
        (osf, outfile) = tempfile.mkstemp()
        os.close(osf)
        dir = os.path.expanduser("~/.cloudinitd/")
        conf_file = self.plan_basedir + "/terminate/top.conf"
        cb = CloudInitD(dir, conf_file, terminate=False, boot=True, ready=True)
        cb.start()
        cb.block_until_complete(poll_period=1.0)
        runname = cb.run_name
        svc = cb.get_service("sampleservice")

        secret = svc.get_attr_from_bag('iaas_secret')
        key = svc.get_attr_from_bag('iaas_key')
        iaas_url = svc.get_attr_from_bag('iaas_url')
        instance_id = svc.get_attr_from_bag('instance_id')
        con = iaas_get_con(None, key=key, secret=secret, iaasurl=iaas_url)
        instance = con.find_instance(instance_id)
        instance.terminate()

        if 'CLOUDINITD_TESTENV' in os.environ:
            bkfab = os.environ['CLOUDINITD_FAB']
            bkssh = os.environ['CLOUDINITD_SSH']
            os.environ['CLOUDINITD_FAB'] = "/bin/false"
            os.environ['CLOUDINITD_SSH'] = "/bin/false"

        rc = cloudinitd.cli.boot.main(["-O", outfile, "-v","-v","-v","-v", "status", runname])
        if 'CLOUDINITD_TESTENV' in os.environ:
            os.environ['CLOUDINITD_FAB'] = bkfab
            os.environ['CLOUDINITD_SSH'] = bkssh
        self._dump_output(outfile)
        n = "ERROR"
        line = self._find_str(outfile, n)
        self.assertNotEqual(line, None)

        rc = cloudinitd.cli.boot.main(["-O", outfile, "terminate",  "%s" % (runname)])
        if 'CLOUDINITD_TESTENV' in os.environ:
            # in fake mode we cannot detect that an instance was killed
            self.assertEqual(rc, 0)
        else:
            self.assertNotEqual(rc, 0)
class TestLogfetch:

    def setup(self):
        """
        Build a fake test environment, with the sleepers cloudinit.d plan.

        We can grab all logged messages from c.log.transcript.
        """

        self.test_run_name = "TESTRUN"

        self.config = ConfigParser.RawConfigParser()
        self.config.add_section("events")
        self.runlogdir = tempfile.mkdtemp()
        self.config.set("events", "runlogdir", self.runlogdir)
        self.vmlogdir = tempfile.mkdtemp()
        self.config.set("events", "vmlogdir", self.vmlogdir)
        self.optdict = {}
        self.optdict[em_args.NAME.name] = self.test_run_name

        self.params = DefaultParameters(self.config, None)
        self.params.optdict = self.optdict
        remote_svc_adapter = FakeRemoteSvcAdapter()
        self.common = FakeCommon()
        self.modules = FakeModules(remote_svc_adapter=remote_svc_adapter)

        # Note that we monkey-patch the get_scp_command_str function
        # to prepend "echo" to it. That way we can still allow the 
        # command to be run, but we can still see how it actually gets
        # constructed
        runlogs = DefaultRunlogs(self.params, self.common)
        runlogs.validate()
        self.modules.runlogs = runlogs
        new_get_scp = make_fake_scp_command_str(runlogs, runlogs.get_scp_command_str)
        self.modules.runlogs.get_scp_command_str = types.MethodType(new_get_scp, self.modules.runlogs)

        self.test_dir = os.path.dirname(__file__)
        self.test_db_dir = tempfile.mkdtemp()
        self.test_cd_config = os.path.join(self.test_dir, "configs/main.conf")
        self.cloudinitd = CloudInitD(self.test_db_dir, self.test_cd_config, self.test_run_name)

    def teardown(self):
        shutil.rmtree(self.test_db_dir)
        shutil.rmtree(self.vmlogdir)
        shutil.rmtree(self.runlogdir)

    def test_fetch_one_vm(self):
        from epumgmt.main.em_core_logfetch import _fetch_one_vm

        test_vm = epumgmt.api.RunVM()

        _fetch_one_vm(self.params, self.common, self.modules,
                      self.test_run_name, test_vm, cloudinitd=self.cloudinitd)

    def test_fetch_by_service_name(self):
        """
        This test constructs a RunVM instance, and then asks
        logfetch to grab its logs. We confirm that the correct
        scp call was made indirectly by examining the transcript
        of the log files.

        We also neuter the scp call by prefixing it with echo, since
        we're not trying to scp from a real host.
        """

        from epumgmt.main.em_core_logfetch import fetch_by_service_name

        test_service_name = "provisioner"

        test_provisioner = epumgmt.api.RunVM()
        test_provisioner.service_type = test_service_name
        test_provisioner_hostname = "test.hostname.example.com"
        test_provisioner.hostname = test_provisioner_hostname
        test_provisioner_vmlogdir = "/some/fake/logdir"
        test_provisioner.vmlogdir = test_provisioner_vmlogdir
        test_provisioner_runlogdir = "/some/fake/local/runlogdir"
        test_provisioner.runlogdir = test_provisioner_runlogdir
        test_provisioner_instanceid = "i-TEST"
        test_provisioner.instanceid = test_provisioner_instanceid

        test_run_vms = []
        test_run_vms.append(test_provisioner)
        self.modules.persistence.store_run_vms(self.test_run_name, test_run_vms)

        # Be tricky and patch in our hostname
        self.cloudinitd.get_service("provisioner")._svc._s.hostname = test_provisioner_hostname

        fetch_by_service_name(self.params, self.common, self.modules,
                              self.test_run_name, test_service_name, self.cloudinitd)

        run_commands = [message for (level, message)
                        in self.common.log.transcript 
                        if level == "DEBUG"
                           and "command =" in message]

        # confirm that scp command gets called for our service 
        expected_scp_pattern = ".*@%s:%s %s" % (test_provisioner_hostname,
                                                test_provisioner_vmlogdir,
                                                test_provisioner_runlogdir)
        # only expect one command to be run
        assert len(run_commands) == 1
        assert re.search(expected_scp_pattern, run_commands[0])


    def test_fetch_all(self):

        from epumgmt.main.em_core_logfetch import fetch_all

        test_service_name = "provisioner"

        test_provisioner = epumgmt.api.RunVM()
        test_provisioner.service_type = test_service_name
        test_provisioner_hostname = "test.hostname.example.com"
        test_provisioner.hostname = test_provisioner_hostname
        test_provisioner_vmlogdir = "/some/fake/logdir"
        test_provisioner.vmlogdir = test_provisioner_vmlogdir
        test_provisioner_runlogdir = "/some/fake/local/runlogdir"
        test_provisioner.runlogdir = test_provisioner_runlogdir
        test_provisioner_instanceid = "i-TEST"
        test_provisioner.instanceid = test_provisioner_instanceid

        # Be tricky and patch in our hostname
        self.cloudinitd.get_service("provisioner")._svc._s.hostname = test_provisioner_hostname

        # Two workers. Note that they have the same hostname
        # to simulate the issue where we have a terminated worker
        # and the second one was booted with the same hostname as
        # the first
        test_worker_0 = epumgmt.api.RunVM()
        test_worker_0_service_type = "iamaworker"
        test_worker_0.service_type = test_worker_0_service_type
        test_worker_0_hostname = "worker0.example.com"
        test_worker_0.hostname = test_worker_0_hostname
        test_worker_0_instanceid = "i-TESTWORKER0"
        test_worker_0.instanceid = test_worker_0_instanceid
        test_worker_0_vmlogdir = "/some/fake/logdir"
        test_worker_0.vmlogdir = test_worker_0_vmlogdir
        test_worker_0_runlogdir = "/some/fake/%s/runlogdir" % test_worker_0_instanceid
        test_worker_0.runlogdir = test_worker_0_runlogdir
        test_worker_0_iaas_state = epustates.TERMINATED
        test_worker_0_events = [Event(name="iaas_state", timestamp=1000, state=test_worker_0_iaas_state)]
        test_worker_0.events = test_worker_0_events


        test_worker_1 = epumgmt.api.RunVM()
        test_worker_1_service_type = "iamaworker"
        test_worker_1.service_type = test_worker_0_service_type
        test_worker_1.hostname = test_worker_0_hostname
        test_worker_1_instanceid = "i-TESTWORKER1"
        test_worker_1.instanceid = test_worker_1_instanceid
        test_worker_1_vmlogdir = "/some/fake/logdir"
        test_worker_1.vmlogdir = test_worker_1_vmlogdir
        test_worker_1_runlogdir = "/some/fake/%s/runlogdir/" % test_worker_1_instanceid
        test_worker_1.runlogdir = test_worker_1_runlogdir
        test_worker_1_iaas_state = epustates.RUNNING
        test_worker_1_events = [Event(name="iaas_state", timestamp=1000, state=test_worker_1_iaas_state)]
        test_worker_1.events = test_worker_1_events

        test_run_vms = []
        test_run_vms.append(test_provisioner)
        test_run_vms.append(test_worker_0)
        test_run_vms.append(test_worker_1)
        self.modules.persistence.store_run_vms(self.test_run_name, test_run_vms)

        fetch_all(self.params, self.common, self.modules, self.test_run_name, self.cloudinitd)
        run_commands = [message for (level, message)
                        in self.common.log.transcript 
                        if level == "DEBUG"
                           and "command =" in message]

        # We have two VMs we should fetch from
        assert len(run_commands) == 2



        # Confirm that we never scp log files from the TERMINATED VM
        for scp_command in run_commands:
            assert scp_command.find(test_worker_0_instanceid) == -1
    def test_prelaunch(self):

        key = None
        secret = None
        url = None
        try:
            key = os.environ['CLOUDINITD_IAAS_ACCESS_KEY']
            secret = os.environ['CLOUDINITD_IAAS_SECRET_KEY']
            url = os.environ['CLOUDINITD_IAAS_URL']
        except:
            pass

        # XXX this test may fail for nimbus
        con = cloudinitd.cb_iaas.iaas_get_con(None, key=key, secret=secret, iaasurl=url)
        i_list = con.get_all_instances()
        conf_file = "multilevelsimple"
        self.plan_basedir = cloudinitd.nosetests.g_plans_dir
        dir = tempfile.mkdtemp()
        conf_file = self.plan_basedir + "/" + conf_file + "/top.conf"
        cb = CloudInitD(dir, conf_file, terminate=False, boot=True, ready=True)
        cb.pre_start_iaas()

        post_boot_list = con.get_all_instances()

        self.assertNotEqual(len(i_list), len(post_boot_list), "The list of instances should have grown")
        self.assertTrue(len(i_list)+3 < len(post_boot_list), "The list of instances should have grown by more than the number of services in the first level")

        cb.start()
        post_start_list = con.get_all_instances()
        self.assertEqual(len(post_boot_list), len(post_start_list), "The list should not have grown")
        cb.block_until_complete(poll_period=1.0)

        cb = CloudInitD(dir, db_name=cb.run_name, terminate=True, boot=False, ready=False)
        cb.shutdown()
        cb.block_until_complete(poll_period=1.0)
        fname = cb.get_db_file()
        os.remove(fname)
Beispiel #22
0
    def get_status_test(self):
        self.plan_basedir = cloudinitd.nosetests.g_plans_dir
        dir = tempfile.mkdtemp()
        conf_file = self.plan_basedir + "/oneservice/top.conf"
        cb = CloudInitD(dir, conf_file, terminate=False, boot=True, ready=True)
        cb.start()
        cb.block_until_complete(poll_period=1.0)

        svc = cb.get_service("sampleservice")
        status = svc.get_iaas_status()
        self.assertEqual("running", status, "status is %s" % (status))

        cb = CloudInitD(dir, db_name=cb.run_name, terminate=True, boot=False, ready=False)
        cb.shutdown()
        cb.block_until_complete(poll_period=1.0)
        fname = cb.get_db_file()
        os.remove(fname)
Beispiel #23
0
    def dep_keys_test(self):
        self.plan_basedir = cloudinitd.nosetests.g_plans_dir
        dir = tempfile.mkdtemp()
        conf_file = self.plan_basedir + "/oneservice/top.conf"
        cb = CloudInitD(dir, conf_file, terminate=False, boot=True, ready=True)
        cb.start()
        cb.block_until_complete(poll_period=1.0)

        svc = cb.get_service("sampleservice")
        attr_keys = svc.get_keys_from_bag()
        # until we figure out how to get __dict__ values from the sqlalchemy objects this will be in complete
        expectations = [
            "hostname",
            "instance_id",
#            "name",
#            "level_id",
#            "image",
#            "iaas",
#            "allocation",
#            "keyname",
#            "localkey",
#            "username",
#            "scp_username",
#            "readypgm",
#            "hostname",
#            "bootconf",
#            "bootpgm",
#            "instance_id",
#            "iaas_url",
#            "iaas_key",
#            "iaas_secret",
#            "contextualized",
#            "securitygroups",
            "webmessage"
            ]
        for e in expectations:
            self.assertTrue(e in attr_keys, "The key %s should exist in %s" % (e, str(attr_keys)))

        cb = CloudInitD(dir, db_name=cb.run_name, terminate=True, boot=False, ready=False)
        cb.shutdown()
        cb.block_until_complete(poll_period=1.0)
        fname = cb.get_db_file()
        os.remove(fname)
Beispiel #24
0
class Containers(object):
    """ a set of ExecutionEngines

    this object wraps the launch-plans and cloudinitd portions of a container launch

    tmp/launch-plan/<pid>/plan/         result of generate-plan
                          cloud.yml     modified copy of profiles/<template>.yml.example
                          launch.yml    modified copy of res/launch/<template>.yml
    """
    def __init__(self, config, couch=None, rabbit=None, graylog=None, elasticsearch=None):
        self.config = config
        self.target_dir = os.path.join('tmp', 'launch-plan', str(os.getpid()))
        self.plan = os.path.join(self.target_dir, 'plan')
        self.name = config.get('containers.name')
        self.source_directory = config.get('containers.launch-plan')

        self.couch = couch
        self.rabbit = rabbit
        self.graylog = graylog
        self.elasticsearch = elasticsearch
        # zookeeper
        self.proc = None

    def create_launch_plan(self):
        os.makedirs(self.target_dir, mode=0755)

        # make private copy of coi-services tarball
        self._prepare_install_software()

        # copy and modify configuration of cloud resources
        self._modify_cloud_config()

        # copy and modify configuration of services and execution engines
        self._modify_launch()
        # copy deploy.yml and modify
        self._modify_deploy()
        self._generate_plan()

    def _prepare_install_software(self):
        cmd = self.config.get('containers.software.copy-command')
        if cmd:
            log.debug('executing: %s', cmd)
            subprocess.check_call(cmd, shell=True)

    def _modify_cloud_config(self):
        """ copy template cloud config file and modify """
        file = os.path.join(self.source_directory, self.config.get('containers.resource-config'))
        log.debug('configuring: %s', file)
        with open(file, 'r') as f:
            data = yaml.load(f)

        # configure appropriately
        data['iaas']['url'] = os.environ['EC2_URL']
        data['iaas']['key'] = os.environ['EC2_ACCESS_KEY']
        data['iaas']['secret'] = os.environ['EC2_SECRET_KEY']
        data['iaas']['base-image'] = self.config.get('containers.image')
        data['iaas']['base-allocation'] = self.config.get('containers.allocation')

        data['rabbitmq']['host'] = self.config.get('rabbit.hostname')
        data['rabbitmq']['username'] = self.config.get('rabbit.username')
        data['rabbitmq']['password'] = self.config.get('rabbit.password')

        data['couchdb']['host'] = self.config.get('couch.hostname')
        data['couchdb']['username'] = self.config.get('couch.username')
        data['couchdb']['password'] = self.config.get('couch.password')

        data['graylog']['host'] = self.config.get('graylog.hostname')

        # TODO: get from config file
        data['zookeeper'] = {
            'enabled': True,
            'hosts': [ 'zk01.s.oceanobservatories.org', 'zk02.s.oceanobservatories.org', 'zk03.s.oceanobservatories.org']
        }
        url = self.config.get('containers.software.url')
        if url:
            if 'packages' not in data or not data['packages']:
                data['packages'] = {}
            data['packages']['coi_services'] = url

        if self.config.get('containers.recipes'):
            if 'packages' not in data or not data['packages']:
                data['packages'] = {}
            data['packages']['dt_data'] = self.config.get('containers.recipes')

        self.cloud_config = os.path.join(self.target_dir, 'resources.yml')
        with open(self.cloud_config, 'w') as f:
            yaml.dump(data, f, default_flow_style=False)

    def _modify_launch(self):
        file = self.config.get('containers.cloud-config')
        log.debug('configuring: %s', file)
        with open(file, 'r') as f:
            data = yaml.load(f)

        all_engines_config = self.config.get('containers.execution-engines')
        for name,engine_config in all_engines_config.iteritems():
            if name not in data['execution_engines']:
                # add new execution engine
                data['execution_engines'][name] = {}

            cfg = data['execution_engines'][name]
            for key in 'slots', 'base_need', 'replicas':
                value = engine_config.get(key)
                if value:
                    cfg[key] = value

        self.launch_config = os.path.join(self.target_dir, 'launch.yml')
        with open(self.launch_config, 'w') as f:
            yaml.dump(data, f, default_flow_style=False)

    def _modify_deploy(self):
        # read template
        src = self.config.get('services.deploy-file')
        log.debug('configuring: %s', src)
        with open(src, 'r') as f:
            data = yaml.load(f)
        # keep subset of existing apps from deploy file
        config_list = self.config.get('services.deploy-list')
        config_apps = self.config.get('services.deploy-apps')
        if config_list == '*' or (not config_list and not config_apps):
            pass # use all apps from config file (default)
        elif not config_list:
            data['apps'] = []
        elif isinstance(config_list, list):
            orig_apps = data['apps']
            data['apps'] = []
            for name in config_list:
                for app in orig_apps:
                    if app['name']==name:
                        data['apps'].append(app)
                        break
        # add explicitly configured apps
        if config_apps:
            for app in config_apps:
                data['apps'].append(app.as_dict())
        # save
        self.deploy_config = os.path.join(self.target_dir, 'deploy.yml')
        with open(self.deploy_config, 'w') as f:
            yaml.dump(data, f, default_flow_style=False)

        # OBSOLETE: no longer run rel2levels, now run generate-plan
#        # run rel2levels
#        # HACK: have to execute with PYTHONPATH that includes YAML, so pass whatever version I'm using
#        yaml_dir = os.path.dirname(os.path.dirname(yaml.__file__))
##        cmd = 'export PYTHONPATH=$PYTHONPATH:' + yaml_dir + ' ; ./rel2levels.py -c ' + self.cloud_config + ' deploy.yml -f --ignore-bootlevels > /dev/null 2>&1'
#        cmd = 'export PYTHONPATH=$PYTHONPATH:' + yaml_dir + ' ; ./rel2levels.py -c ' + self.cloud_config + ' deploy.yml -f > /dev/null 2>&1'
#        code = subprocess.call(cmd, shell=True, cwd=self.launch_plan)
#        if code<0:
#            raise Exception('failed to execute ' + cmd)

    def _generate_plan(self):
        logconfig = self.config.get('containers.logging-config')
        if logconfig:
            cmd = 'bin/generate-plan --profile %s --rel %s --launch %s --logconfig %s --ignore-bootlevels %s' % (
                        os.path.abspath(self.cloud_config), os.path.abspath(self.deploy_config),
                        os.path.abspath(self.launch_config), os.path.abspath(logconfig), os.path.abspath(self.plan))
        else:
            cmd = 'bin/generate-plan --profile %s --rel %s --launch %s --ignore-bootlevels %s' % (
                os.path.abspath(self.cloud_config), os.path.abspath(self.deploy_config),
                os.path.abspath(self.launch_config), os.path.abspath(self.plan))
        log.debug('executing: %s', cmd)
        code = subprocess.call(cmd, shell=True, cwd=self.source_directory)
        if code!=0:
            raise ApeException('failed to execute ' + cmd)

    def launch_containers(self):
#        home = os.environ['HOME']
#        file = os.path.join(self.launch_plan, self.cloud_config)
        cmd='cloudinitd boot -vvv -n %s launch.conf' % self.name
#        print '====>>> about to execute: ' + cmd

        # HACK: cannot launch into BG and then connect -- CloudInitD will throw exception.  so must always wait until launch completes instead
        #self.proc = subprocess.Popen(cmd, shell=True, cwd=self.plan)
        #status = self.proc.wait()
        log.debug('executing: %s', cmd)
        subprocess.check_call(cmd, shell=True, cwd=self.plan)

        file = os.path.join(os.path.abspath(self.plan), 'launch.conf')
        self.connect_cloudinitd()

    def connect_cloudinitd(self, must_exist=False):
        config = os.path.join(os.path.abspath(self.plan), 'launch.conf')
        home = os.environ['HOME']
        db_file = os.path.join(home, '.cloudinitd', 'cloudinitd-'+self.name+'.db')
        print '*** ' + db_file + ' must exist? ' + repr(must_exist)
        if must_exist and not os.path.exists(db_file):
            raise ApeException('cannot reconnect to cloudinitd -- launch does not exist')
        elif os.path.exists(db_file):
            self.util = CloudInitD(home + '/.cloudinitd', config_file=config, db_name=self.name, boot=False, ready=False, fail_if_db_present=False)

    def wait_for_containers(self):
        print 'waiting...'
        if self.proc:
            self.proc.wait()
            self.proc = None
#        self.util.block_until_complete(poll_period=2)


    def get_manager(self):
        return SimpleManager(**self.get_nodes_broker())

    def get_nodes_broker(self):
        """ interrogate cloudinitd for rabbitmq parameters """
        vars = {}
        svc_list = self.util.get_all_services()
        for svc in svc_list:
            if svc.name == 'basenode':
                vars['broker_hostname'] = svc.get_attr_from_bag("hostname")
                vars['broker_username'] = svc.get_attr_from_bag("rabbitmq_username")
                vars['broker_password'] = svc.get_attr_from_bag("rabbitmq_password")
        return vars

    def get_process_list(self):
        cmd='ceictl -n %s -t 60 process list' % self.name
#        print '====>>> about to execute: ' + cmd

        # HACK: cannot launch into BG and then connect -- CloudInitD will throw exception.  so must always wait until launch completes instead
        #self.proc = subprocess.Popen(cmd, shell=True, cwd=self.plan)
        #status = self.proc.wait()
        processes = [ ]
        current = { }
        log.debug('executing: %s', cmd)
        lines = iter(subprocess.check_output(cmd, shell=True).split('\n'))
        try:
            while True:
                line = lines.next()
                if line:
                    fields = line.split('=')
                    name = fields[0].strip()
                    value = fields[1].strip()
                    if name=='Process ID':
                        if current:
                            processes.append(current)
                            current = { }
                    elif name=='Process Name':
                        idlen = len(value.split('-')[-1])
                        type = value[0:-idlen-1]
                        current['type'] = type
                    current[name] = value
        except StopIteration:
            pass
        return processes
 def _status(self, dir, run_name):
     cb = CloudInitD(dir, db_name=run_name, terminate=False, boot=False, ready=True, continue_on_error=True)
     cb.start()
     cb.block_until_complete(poll_period=1.0)
Beispiel #26
0
    def test_validateiaas(self):
        self.plan_basedir = cloudinitd.nosetests.g_plans_dir
        dir = tempfile.mkdtemp()
        conf_file = self.plan_basedir + "/iaastypevalidate/top.conf"
        cb = CloudInitD(dir, conf_file, terminate=False, boot=True, ready=True)
        cb.start()

        cb.block_until_complete(poll_period=1.0)

        # check the log for a warning
        fname = os.path.expanduser("~/.cloudinitd/%s/badsvc.log" % (cb.run_name))
        print fname
        self.assertTrue(os.path.exists(fname), "The path %s should exist" % (fname))
        f = open(fname, "r")
        found = False
        for l in f.readlines():
            print l
            ndx = l.find("WARN")
            if ndx >= 0:
                ndx = l.find("2.7")
                if ndx >= 0:
                    found = True
        self.assertTrue(found, "a warning with the key 2.7 should be in the logfile %s" %(fname))
        f.close()

        cb = CloudInitD(dir, db_name=cb.run_name, terminate=True, boot=False, ready=False)
        cb.shutdown()
        cb.block_until_complete(poll_period=1.0)
        fname = cb.get_db_file()
        os.remove(fname)
 def _terminate(self, dir, run_name):
     cb = CloudInitD(dir, db_name=run_name, terminate=True, boot=False, ready=False, continue_on_error=True)
     cb.shutdown()
     cb.block_until_complete(poll_period=1.0)
     fname = cb.get_db_file()
     os.remove(fname)
    def test_badlevel_creds(self):
        ilist_1 = self._get_running_vms()
        count1 = len(ilist_1)
        self.plan_basedir = cloudinitd.nosetests.g_plans_dir
        dir = tempfile.mkdtemp()
        conf_file = self.plan_basedir + "/badlevel2.2/top.conf"

        pass_ex = True
        cb = CloudInitD(dir, conf_file, terminate=False, boot=True, ready=True)
        try:
            cb.start()
            cb.block_until_complete(poll_period=1.0)
        except Exception, ex:
            pass_ex = True
        self.assertTrue(pass_ex, "An exception should have happened and didn't")

        cb = CloudInitD(dir, db_name=cb.run_name, terminate=True, boot=False, ready=False)
        cb.shutdown()
        cb.block_until_complete(poll_period=1.0)

        time.sleep(5)
        ilist_2 = self._get_running_vms()
        count2 = len(ilist_2)

        self.assertEqual(count1, count2, "the vm count before and after should be the same: %d %d" % (count1, count2))


if __name__ == '__main__':
    unittest.main()
Beispiel #29
0
    def _start_one(self, conf_file):
        self.plan_basedir = cloudinitd.nosetests.g_plans_dir
        dir = tempfile.mkdtemp()
        conf_file = self.plan_basedir + "/" + conf_file + "/top.conf"
        cb = CloudInitD(dir, conf_file, terminate=False, boot=True, ready=True)
        cb.start()
        cb.block_until_complete(poll_period=1.0)

        cb = CloudInitD(dir, db_name=cb.run_name, terminate=True, boot=False, ready=False)
        cb.shutdown()
        cb.block_until_complete(poll_period=1.0)
        fname = cb.get_db_file()
        os.remove(fname)
    def test_env_set(self):
        if cloudinitd.nosetests.is_a_test():
            raise SkipTest()
        dir = tempfile.mkdtemp()
        conf_file = self.plan_basedir + "/oneservice/top.conf"
        cb = CloudInitD(dir, conf_file, terminate=False, boot=True, ready=True)
        cb.start()
        cb.block_until_complete(poll_period=1.0)
        svc = cb.get_service("sampleservice")
        ssh_cmd = svc.get_ssh_command() + " ls -l %s" % (self.remote_dir)

        # form test directory command
        print ssh_cmd

        rc = os.system(ssh_cmd)
        self.assertEquals(rc, 0)

        cb = CloudInitD(dir, db_name=cb.run_name, terminate=True, boot=False, ready=False)
        cb.shutdown()
        cb.block_until_complete(poll_period=1.0)