Пример #1
0
def undeploy_from_appserver(zoomdb, app_id, bundle_id,
                            appserver_instance_id, appserver_port,
                            zero_undeploys_ok=False):

    my_hostname = utils.node_meta("name")
    my_instance_id = utils.node_meta("instance_id")

    if appserver_instance_id not in (my_hostname, my_instance_id, "localhost"):
        raise utils.InfrastructureException(
            "Incorrect appserver received undeploy_from_appserver task; " +
            "I am %s but the undeploy is for %s." % (my_hostname,
                                                     appserver_instance_id))

    bundle = zoomdb.get_bundle(bundle_id)

    num_stopped = stop_serving_bundle(app_id, bundle.bundle_name)

    if num_stopped == 0 and zero_undeploys_ok:
        zoomdb.log("Note: no matching bundles were running on %s." %
                   my_hostname)

    elif num_stopped != 1:
        raise utils.InfrastructureException(
            ("Attempting to undeploy one bundle (app_id %s, bundle_id %s, "
             "bundle_name %s) from appserver %s:%d, but %d bundles were "
             "stopped.")
            % (app_id, bundle_id, bundle.bundle_name,
               appserver_instance_id, appserver_port, num_stopped))

    app_dir, bundle_dir = utils.app_and_bundle_dirs(app_id,
                                                    bundle.bundle_name)
    if os.path.isdir(bundle_dir):
        zoomdb.log("Removing old bundle from %s." % bundle_dir)
        utils.chown_to_me(bundle_dir)
        shutil.rmtree(bundle_dir)
Пример #2
0
def start_serving_bundle(app_id, bundle_name):
    """
    Serve the given bundle under supervisor, and return the appserver info
    for where the service is running.

    If you are running locally as dev, you need to make sure the user
    running Celery has permissions to write to the /etc/supervisor/conf.d dir.

    $ sudo chgrp nateaune /etc/supervisor/conf.d/
    $ sudo chmod g+w /etc/supervisor/conf.d/

    :returns: (instance_id, node_name, host_ip, host_port)
    """

    # check that this bundle isn't already being served here - otherwise
    # supervisor will silently ignore the redundant config files!
    for bun in get_active_bundles():
        if bun["app_id"] == app_id and bun["bundle_name"] == bundle_name:
            raise utils.InfrastructureException((
                    "Redundant bundle service request: server %s (hostname=%s)"
                    " is already serving app_id %s, bundle_name %s.") % (
                    utils.node_meta("name"),
                    socket.gethostname(),
                    app_id,
                    bundle_name))

    port_to_use = _get_a_free_port()

    config_filename = os.path.join(taskconfig.SUPERVISOR_APP_CONF_DIR,
                                   "%s.%s.port.%d.conf" % (app_id,
                                                           bundle_name,
                                                           port_to_use))

    app_dir, bundle_dir = utils.app_and_bundle_dirs(app_id, bundle_name)

    utils.render_tpl_to_file(
        'deploy/supervisor_entry.conf',
        config_filename,
        run_in_userenv=os.path.join(taskconfig.PRIVILEGED_PROGRAMS_PATH,
                                    "run_in_userenv"),
        custdir=taskconfig.NR_CUSTOMER_DIR,
        bundle_name=bundle_name,
        bundle_runner=os.path.join(bundle_dir, "thisbundle.py"),
        bundle_dir=bundle_dir,
        app_user=app_id,
        port=port_to_use)

    _kick_supervisor()

    instance_id = utils.node_meta("instance_id")
    node_name = utils.node_meta("name")
    host_ip = utils.get_internal_ip()

    return (instance_id, node_name, host_ip, port_to_use)
Пример #3
0
def get_or_create_database(app_id):
    """
    Get or create a database and username for a customer application.

    :param app_id: app_id of the application. Currently this is used as the
    dbname and username, but those labels may change in the future.

    :returns: A tuple (created, db_host, db_name, db_username, db_password).
              If created is False, db_password will be None.
    """
    db_host = utils.node_meta("elastic_hostname")
    # utils.get_internal_ip()  # TODO: should be elastic hostname
    db_name = app_id
    db_username = app_id
    db_password = None
    created = False

    (cur, conn) = _get_cur_conn_as_superuser()

    try:
        cur.execute("CREATE DATABASE %s" % db_name)

    except psycopg2.OperationalError, e:
        # database cannot be created, this is a problem. raise it.
        raise e
Пример #4
0
def deploy_app_bundle(app_id, bundle_name, appserver_name, dbinfo,
                      bundle_storage_engine=None,
                      num_workers=1):

    bundle_storage_engine = bundle.get_bundle_storage_engine(
        bundle_storage_engine)

    my_hostname = utils.node_meta("name")
    my_instance_id = utils.node_meta("instance_id")

    if appserver_name not in (my_hostname, my_instance_id, "localhost"):
        raise utils.InfrastructureException(
            "Incorrect appserver received deploy_app_bundle task; " +
            "I am %s but the deploy is requesting %s." % (my_hostname,
                                                          appserver_name))

    install_app_bundle(app_id, bundle_name, appserver_name, dbinfo,
                       bundle_storage_engine=bundle_storage_engine,
                       num_workers=num_workers)

    # result is a (instance_id, node_name, host_ip, port_to_use)
    return start_serving_bundle(app_id, bundle_name)
Пример #5
0
def gunicorn_signal(gunicorn_master_pid, signal_name, appserver_name):
    my_hostname = utils.node_meta("name")

    if appserver_name not in (my_hostname, "localhost"):
        raise utils.InfrastructureException(
            "Incorrect appserver received gunicorn_signal task; " +
            "I am %s but the task is requesting %s." % (my_hostname,
                                                        appserver_name))

    if signal_name not in ("TTIN", "TTOU"):
        raise utils.InfrastructureException(
            "Unexpected gunicorn_signal %s: only TTIN & TTOU allowed."
            % signal_name)

    utils.local_privileged(["gunicorn_signal",
                            signal_name,
                            gunicorn_master_pid])
Пример #6
0
def remove_local_proxy_config(app_id):
    site_conf_filename = _get_nginx_conffile(app_id)

    if not os.path.isfile(site_conf_filename):
        raise utils.InfrastructureException((
            "Requested remove_local_proxy_config for app %s, but that "
            "app is not currently proxied from this nginx instance (%s). "
            "No site configuration file in %s.")
                                            % (app_id,
                                               utils.node_meta("name"),
                                               site_conf_filename))

    app_dir, _ = utils.app_and_bundle_dirs(app_id)
    shutil.rmtree(app_dir, ignore_errors=True)

    os.remove(site_conf_filename)
    utils.local_privileged(["kick_nginx"])
Пример #7
0
    def test_node_metadata(self):
        instance_id = utils.node_meta("instance_id")
        node_name = utils.node_meta("name")
        node_role = utils.node_meta("role")

        self.assertEqual(instance_id, "localhost")
        self.assertEqual(node_name, utils.local("hostname").strip())
        self.assertEqual(node_role, "localhost")

        here = path.abspath(path.split(__file__)[0])
        test_fixture_meta = path.join(here, '../fixtures', 'node_meta')
        self.patch(taskconfig, "NODE_META_DATA_DIR", test_fixture_meta)

        self.assertEqual(utils.node_meta("instance_id"), "i-12345")
        self.assertEqual(utils.node_meta("name"), "myname")
        self.assertEqual(utils.node_meta("role"), "myrole")
Пример #8
0
def server_profile(panel):
    panel.logger.info("Remote control server_profile request.")
    return {"name": utils.node_meta("name"),
            "role": utils.node_meta("role"),
            "instance_id": utils.node_meta("instance_id"),
            }
Пример #9
0
"""
This module contains 'jobs' for getting application log segments. These are
implemented as broadcast commands, rather than celery jobs, in order to
faciliate a rapid response. This approach probably does not scale well,
though.
"""

import datetime

from celery.task.control import broadcast
from celery.worker.control import Panel
from dz.tasklib import logs, utils, taskconfig

MY_HOSTNAME = utils.node_meta("name")

LOG_SERVICE_NOT_AVAILABLE = object()


@Panel.register
def distlog_get_available_logs(panel, app_id=None):
    panel.logger.info("Remote control distlog_get_available_logs request, " "app_id=%s." % app_id)

    if not app_id:
        raise ValueError("distlog_get_available_logs: " "Missing parameter app_id.")

    result = []
    for loginfo in logs.get_available_logs(app_id):
        loginfo["hostname"] = MY_HOSTNAME
        result.append(loginfo)
    return result