Beispiel #1
0
    def handle(self, *args, **kwargs):
        """
        Generate config files for the realtime module, and send the node realtime command to stdout.

        The reason for sending the command to stdout instead of just running
        it is so that supervisord can directly manage the resulting node
        process (otherwise we would have to handle passing signals through).
        """
        from chroma_core.lib.util import site_dir

        SITE_ROOT = site_dir()
        REALTIME_DIR = os.path.join(SITE_ROOT, 'ui-modules', 'node_modules',
                                    '@iml/realtime')
        CONF = os.path.join(REALTIME_DIR, "conf.json")

        conf = {
            "LOG_PATH":
            SITE_ROOT if not len(settings.LOG_PATH) else settings.LOG_PATH,
            "REALTIME_PORT": settings.REALTIME_PORT,
            "SERVER_HTTP_URL": settings.SERVER_HTTP_URL
        }

        source_map_glob = os.path.join(SITE_ROOT, "ui-modules", "node_modules",
                                       "@iml", "gui", "dist", "built*.map")
        source_map_paths = glob.glob(source_map_glob)

        if source_map_paths:
            conf["SOURCE_MAP_PATH"] = source_map_paths[0]

        json.dump(conf, open(CONF, 'w'), indent=2)

        cmdline = ["node", REALTIME_DIR + '/server.js']
        print " ".join(cmdline)
    def handle(self, *args, **kwargs):
        """
        Generate config a file for the view-server, and send the view-server command to stdout.

        The reason for sending the command to stdout instead of just running
        it is so that supervisord can directly manage the resulting
        process (otherwise we would have to handle passing signals through).
        """
        from chroma_core.lib.util import site_dir

        SITE_ROOT = site_dir()
        VIEW_SERVER_DIR = os.path.join(SITE_ROOT, 'ui-modules', 'node_modules',
                                       '@iml', 'view-server', 'dist', 'source')
        CONF = os.path.join(VIEW_SERVER_DIR, "conf.json")

        conf = {
            "ALLOW_ANONYMOUS_READ": settings.ALLOW_ANONYMOUS_READ,
            "BUILD": settings.BUILD,
            "IS_RELEASE": settings.IS_RELEASE,
            "LOG_PATH": settings.LOG_PATH,
            "SERVER_HTTP_URL": settings.SERVER_HTTP_URL,
            "SITE_ROOT": settings.SITE_ROOT,
            "VIEW_SERVER_PORT": settings.VIEW_SERVER_PORT,
            "VERSION": settings.VERSION
        }

        json.dump(conf, open(CONF, 'w'), indent=2)

        cmdline = ["node", VIEW_SERVER_DIR + '/server.js']
        print " ".join(cmdline)
    def handle(self, *args, **kwargs):
        """
        Generate config files for running nginx, and send the nginx command
        line to stdout.

        The reason for sending the command line to stdout instead of just running
        it is so that supervisord can directly manage the resulting nginx
        process (otherwise we would have to handle passing signals through).
        """
        from chroma_core.lib.util import site_dir

        # This command is only for development
        assert settings.DEBUG

        SITE_ROOT = site_dir()
        join_site_root = partial(os.path.join, SITE_ROOT)

        DEV_NGINX_DIR = join_site_root("dev_nginx")
        join_nginx_dir = partial(join_site_root, DEV_NGINX_DIR)

        NGINX_CONF_TEMPLATE = join_site_root("nginx.conf.template")
        NGINX_CONF = join_nginx_dir("nginx.conf")

        CHROMA_MANAGER_CONF_TEMPLATE = join_site_root(
            "chroma-manager.conf.template")
        CHROMA_MANAGER_CONF = join_nginx_dir("chroma-manager.conf")

        if not os.path.exists(DEV_NGINX_DIR):
            os.makedirs(DEV_NGINX_DIR)

        def write_conf(template_path, conf_path):
            conf_text = Template(open(template_path).read()).render(
                Context({
                    'var': DEV_NGINX_DIR,
                    'log': SITE_ROOT,
                    'SSL_PATH': settings.SSL_PATH,
                    'APP_PATH': settings.APP_PATH,
                    'REPO_PATH': settings.DEV_REPO_PATH,
                    'HTTP_FRONTEND_PORT': settings.HTTP_FRONTEND_PORT,
                    'HTTPS_FRONTEND_PORT': settings.HTTPS_FRONTEND_PORT,
                    'HTTP_AGENT_PORT': settings.HTTP_AGENT_PORT,
                    'HTTP_API_PORT': settings.HTTP_API_PORT,
                    'REALTIME_PORT': settings.REALTIME_PORT,
                    'VIEW_SERVER_PORT': settings.VIEW_SERVER_PORT
                }))
            open(conf_path, 'w').write(conf_text)

        write_conf(NGINX_CONF_TEMPLATE, NGINX_CONF)
        write_conf(CHROMA_MANAGER_CONF_TEMPLATE, CHROMA_MANAGER_CONF)

        print " ".join([self._nginx_path, "-c", NGINX_CONF])
    def handle(self, *args, **options):
        from chroma_core.lib import service_config

        sc = service_config.ServiceConfig()
        sc._setup_rabbitmq_credentials()
        sc._setup_crypto()
        sc._syncdb()
        sc.scan_repos()

        profile_path = os.path.join(
            site_dir(), "../chroma-bundles/base_managed_RH7.profile")

        for profile_path in glob.glob(
                os.path.join(os.path.dirname(profile_path), "*.profile")):
            with open(profile_path) as profile_file:
                service_config.register_profile(profile_file)

        print("""Great success:
 * run `systemctl start iml-manager.target`
 * open %s""" % settings.SERVER_HTTP_URL)
Beispiel #5
0
    def handle(self, *args, **options):
        from chroma_core.lib import service_config
        from chroma_core.models import Bundle

        sc = service_config.ServiceConfig()
        sc._setup_rabbitmq_credentials()
        sc._setup_crypto()
        sc._syncdb()

        # default, works for --no-bundles
        profile_path = os.path.join(site_dir(), "../chroma-bundles/base_managed_RH7.profile")

        if options['no_bundles']:
            for bundle in ['lustre', 'lustre-client', 'iml-agent', 'e2fsprogs', 'robinhood', 'zfs']:
                Bundle.objects.get_or_create(bundle_name=bundle, location="/tmp/", description="Dummy bundle")
        else:
            # override the default path if we have unpacked a real archive
            repo_profile_path = os.path.join(settings.DEV_REPO_PATH, 'base_managed_RH7.profile')
            if os.path.isfile(repo_profile_path):
                profile_path = repo_profile_path

            with open(profile_path) as f:
                bundle_names = json.load(f)['bundles']
            missing_bundles = bundle_names

            bundle_files = glob.glob(os.path.join(settings.DEV_REPO_PATH, "*-bundle.tar.gz"))
            for bundle_file in bundle_files:
                archive = tarfile.open(bundle_file, "r:gz")
                meta = json.load(archive.extractfile("./meta"))
                repo = os.path.join(settings.DEV_REPO_PATH, meta['name'])

                if not os.path.exists(os.path.join(repo, 'meta')):
                    print "Extracting %s" % meta['name']
                    if not os.path.exists(repo):
                        os.makedirs(repo)

                    #archive.list()
                    archive.extractall(repo)
                    archive.close()

                if not Bundle.objects.filter(location=repo).exists():
                    service_config.bundle('register', repo)

                try:
                    missing_bundles.remove(meta['name'])
                except ValueError:
                    # Bundles not associated with a profile are OK
                    pass

            if len(missing_bundles):
                print """
Missing bundles: %(bundles)s

Package bundles are required for installation. In order to proceed, you
have 3 options:
    1. Download an installer from %(bundle_url)s and unpack it in %(repo_path)s
    2. Build an installer locally and unpack it in %(repo_path)s
    3. Run ./manage.py dev_setup --no-bundles to generate a set of fake
       bundles for simulated servers

Please note that the fake bundles can't be used to install real storage
servers -- you'll need to use one of the first two methods in order to make
that work.
    """ % {'bundle_url': "http://build.whamcloudlabs.com/job/chroma/arch=x86_64,distro=el6.4/lastSuccessfulBuild/artifact/chroma-bundles/", 'repo_path': settings.DEV_REPO_PATH, 'bundles': ", ".join(missing_bundles)}
                sys.exit(1)

        for profile_path in glob.glob(os.path.join(os.path.dirname(profile_path), '*.profile')):
            with open(profile_path) as profile_file:
                service_config.register_profile(profile_file)

        print """Great success:
 * run `./manage.py supervisor`
 * open %s""" % settings.SERVER_HTTP_URL
class SupervisorTestCase(TestCase):
    """A test case which starts and stops supervisor services"""

    SERVICES = []
    PORTS = {  # service ports to wait on binding
        'http_agent': [settings.HTTP_AGENT_PORT],
        'nginx': [settings.HTTPS_FRONTEND_PORT, settings.HTTP_FRONTEND_PORT],
        'view_server': [settings.VIEW_SERVER_PORT]
    }
    TIMEOUT = 5  # default timeout to wait for services to start
    CONF = os.path.join(site_dir(), "supervisord.conf")
    TEST_USERNAME = '******'
    TEST_PASSWORD = '******'
    TEST_PORT = 9876

    def __init__(self, *args, **kwargs):
        super(SupervisorTestCase, self).__init__(*args, **kwargs)
        self._supervisor = None
        self._tmp_conf = None

    def _wait_for_supervisord(self):
        try:
            self._wait_for_port(self.TEST_PORT)
        except AssertionError:
            rc = self._supervisor.poll()
            if rc is not None:
                stdout, stderr = self._supervisor.communicate()
                log.error("supervisord stdout: %s" % stdout)
                log.error("supervisord stderr: %s" % stderr)
                log.error("supervisord rc = %s" % rc)
                raise AssertionError("supervisord terminated prematurely with status %s" % rc)
            else:
                raise

    def _wait_for_port(self, port):
        log.info("Waiting for port %s..." % port)
        for _ in util.wait(self.TIMEOUT):
            try:
                return socket.socket().connect(('localhost', port))
            except socket.error:
                pass
        raise

    def setUp(self):
        cfg_stringio = StringIO(open(self.CONF).read())
        cp = ConfigParser()
        cp.readfp(cfg_stringio)
        self.programs = []
        for section in cp.sections():
            if section.startswith("program:"):
                progname = section.split("program:")[1]
                self.programs.append(progname)
                cp.set(section, 'autostart', 'false')

        cp.set("inet_http_server", "port", "127.0.0.1:%s" % self.TEST_PORT)
        cp.set("inet_http_server", "username", self.TEST_USERNAME)
        cp.set("inet_http_server", "password", self.TEST_PASSWORD)

        cp.set("supervisorctl", "username", self.TEST_USERNAME)
        cp.set("supervisorctl", "password", self.TEST_PASSWORD)
        cp.set("supervisorctl", "serverurl", "http://*****:*****@localhost:%s/RPC2" % (self.TEST_USERNAME, self.TEST_PASSWORD, self.TEST_PORT))

            for service in self.SERVICES:
                log.info("Starting service '%s'" % service)
                self.start(service)
            for service in set(self.SERVICES) - set(self.PORTS):
                self.assertRunning(service, uptime=self.TIMEOUT)
        except:
            # Ensure we don't leave a supervisor process behind
            self.tearDown()
            raise

    def tearDown(self):
        # You can't import this gobally because DJANGO_SETTINGS_MODULE is not initialized yet for some
        # reason, but maybe by the time the code meanders its way to here it will work.
        from chroma_core.services.rpc import RpcClientFactory

        test_failed = (sys.exc_info() != (None, None, None))

        if test_failed:
            log.info(self._xmlrpc.system.listMethods())
            log_chunk = self._xmlrpc.supervisor.readLog(0, 4096)
            log.error("Supervisor log: %s" % log_chunk)

        # Shutdown any RPC Threads if they were started. Bit of horrible insider knowledge here.
        if RpcClientFactory._lightweight is False:
            RpcClientFactory.shutdown_threads()
            RpcClientFactory._lightweight = True
            RpcClientFactory._available = True
            RpcClientFactory._instances = {}

        if self._supervisor is not None:
            try:
                self._xmlrpc.supervisor.shutdown()
                stdout, stderr = self._supervisor.communicate()
                # Echo these at the end: by outputting using sys.std* rather than
                # letting the subprocess write directly, this verbose output can be
                # captured by nose and only output on failure.
                sys.stdout.write(stdout)
                sys.stdout.write(stderr)
            except:
                self._supervisor.kill()
            finally:
                self._supervisor = None

        if self._tmp_conf and os.path.exists(self._tmp_conf.name):
            os.unlink(self._tmp_conf.name)

    def start(self, program):
        self._xmlrpc.supervisor.startProcess(program)
        for port in self.PORTS.get(program, []):
            self._wait_for_port(port)
        self.assertRunning(program)

    def stop(self, program):
        self._xmlrpc.supervisor.stopProcess(program)
        self.assertStopped(program)

    def restart(self, program):
        self.stop(program)
        self.start(program)

    def assertRunning(self, program, uptime=0):
        info = self._xmlrpc.supervisor.getProcessInfo(program)
        self.assertEqual(info['statename'], "RUNNING")
        time.sleep(max(0, uptime + info['start'] - info['now']))

    def assertStopped(self, program):
        info = self._xmlrpc.supervisor.getProcessInfo(program)
        self.assertEqual(info['statename'], "STOPPED")

    def assertResponseOk(self, response):
        self.assertTrue(response.ok, "%s: %s" % (response.status_code, response.content))

    def assertExitedCleanly(self, program_name):
        info = self._xmlrpc.supervisor.getProcessInfo(program_name)
        try:
            self.assertEqual(info['exitstatus'], 0, "%s exitstatus=%s (detail: %s)" % (program_name, info['exitstatus'], info))
        except AssertionError:
            log.error("%s stdout: %s" % (program_name, self._xmlrpc.supervisor.readProcessStdoutLog(program_name, 0, 4096)))
            log.error("%s stderr: %s" % (program_name, self._xmlrpc.supervisor.readProcessStderrLog(program_name, 0, 4096)))
            log.error(self.tail_log("%s.log" % program_name))
            log.error(self.tail_log("supervisord.log"))
            raise

    def tail_log(self, log_name):
        with open(log_name) as log_file:
            log_tail = ''.join(log_file.readlines()[-20:])
        return """
Tail for %s:
------------------------------
%s
""" % (log_name, log_tail)
    def handle(self, *args, **options):
        from chroma_core.lib import service_config
        from chroma_core.models import Bundle

        sc = service_config.ServiceConfig()
        sc._setup_rabbitmq_credentials()
        sc._setup_crypto()
        sc._syncdb()

        # default, works for --no-bundles
        profile_path = os.path.join(
            site_dir(), "../chroma-bundles/base_managed_RH7.profile")

        if options["no_bundles"]:
            for bundle in ["iml-agent", "external"]:
                Bundle.objects.get_or_create(bundle_name=bundle,
                                             location="/tmp/",
                                             description="Dummy bundle")
        else:
            # override the default path if we have unpacked a real archive
            repo_profile_path = os.path.join(settings.DEV_REPO_PATH,
                                             "base_managed_RH7.profile")
            if os.path.isfile(repo_profile_path):
                profile_path = repo_profile_path

            with open(profile_path) as f:
                bundle_names = json.load(f)["bundles"]
            missing_bundles = bundle_names

            bundle_files = glob.glob(
                os.path.join(settings.DEV_REPO_PATH, "*-bundle.tar.gz"))
            for bundle_file in bundle_files:
                archive = tarfile.open(bundle_file, "r:gz")
                meta = json.load(archive.extractfile("./meta"))
                repo = os.path.join(settings.DEV_REPO_PATH, meta["name"])

                if not os.path.exists(os.path.join(repo, "meta")):
                    print("Extracting %s" % meta["name"])
                    if not os.path.exists(repo):
                        os.makedirs(repo)

                    archive.extractall(repo)
                    archive.close()

                if not Bundle.objects.filter(location=repo).exists():
                    service_config.bundle("register", repo)

                try:
                    missing_bundles.remove(meta["name"])
                except ValueError:
                    # Bundles not associated with a profile are OK
                    pass

            if len(missing_bundles):
                print(
                    """
Missing bundles: %(bundles)s

Package bundles are required for installation. In order to proceed, you
have 2 options:
    1. Download an installer from %(bundle_url)s and unpack it in %(repo_path)s
    2. Build an installer locally and unpack it in %(repo_path)s

Please note that the fake bundles can't be used to install real storage
servers -- you'll need to use one of the first two methods in order to make
that work.
    """ % {
                        "bundle_url":
                        "http://jenkins.lotus.hpdd.lab.intel.com/job/manager-for-lustre/arch=x86_64,distro=el7/lastSuccessfulBuild/artifact/chroma-bundles/",
                        "repo_path": settings.DEV_REPO_PATH,
                        "bundles": ", ".join(missing_bundles),
                    })
                sys.exit(1)

        for profile_path in glob.glob(
                os.path.join(os.path.dirname(profile_path), "*.profile")):
            with open(profile_path) as profile_file:
                service_config.register_profile(profile_file)

        print("""Great success:
 * run `systemctl start iml-manager.target`
 * open %s""" % settings.SERVER_HTTP_URL)