コード例 #1
0
def _install_rabbitmq():
    erlang_rpm_source_url = ctx.node.properties['erlang_rpm_source_url']
    rabbitmq_rpm_source_url = ctx.node.properties['rabbitmq_rpm_source_url']
    # TODO: maybe we don't need this env var
    os.putenv('RABBITMQ_FD_LIMIT',
              str(ctx.node.properties['rabbitmq_fd_limit']))
    rabbitmq_log_path = '/var/log/cloudify/rabbitmq'
    rabbitmq_username = ctx.node.properties['rabbitmq_username']
    rabbitmq_password = ctx.node.properties['rabbitmq_password']
    rabbitmq_cert_public = ctx.node.properties['rabbitmq_cert_public']
    rabbitmq_ssl_enabled = ctx.node.properties['rabbitmq_ssl_enabled']
    rabbitmq_cert_private = ctx.node.properties['rabbitmq_cert_private']

    ctx.logger.info('Installing RabbitMQ...')
    utils.set_selinux_permissive()

    utils.copy_notice('rabbitmq')
    utils.mkdir(rabbitmq_log_path)

    utils.yum_install(erlang_rpm_source_url)
    utils.yum_install(rabbitmq_rpm_source_url)

    utils.logrotate('rabbitmq')

    utils.deploy_blueprint_resource(
        '{0}/kill-rabbit'.format(CONFIG_PATH),
        '/usr/local/bin/kill-rabbit')
    utils.chmod('500', '/usr/local/bin/kill-rabbit')

    utils.systemd.configure('rabbitmq')

    ctx.logger.info('Configuring File Descriptors Limit...')
    utils.deploy_blueprint_resource(
        '{0}/rabbitmq_ulimit.conf'.format(CONFIG_PATH),
        '/etc/security/limits.d/rabbitmq.conf')

    utils.systemd.systemctl('daemon-reload')

    utils.chown('rabbitmq', 'rabbitmq', rabbitmq_log_path)

    utils.systemd.start('cloudify-rabbitmq')

    time.sleep(10)
    utils.wait_for_port(5672)

    ctx.logger.info('Enabling RabbitMQ Plugins...')
    # Occasional timing issues with rabbitmq starting have resulted in
    # failures when first trying to enable plugins
    utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_management'],
               retries=5)
    utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_tracing'], retries=5)

    _clear_guest_permissions_if_guest_exists()
    _create_user_and_set_permissions(rabbitmq_username, rabbitmq_password)
    _set_security(
        rabbitmq_ssl_enabled,
        rabbitmq_cert_private,
        rabbitmq_cert_public)

    utils.systemd.stop('cloudify-rabbitmq', retries=5)
コード例 #2
0
def main():

    influxdb_endpoint_ip = ctx_properties['influxdb_endpoint_ip']
    # currently, cannot be changed due to webui not allowing to configure it.
    influxdb_endpoint_port = 8086

    if influxdb_endpoint_ip:
        ctx.logger.info('External InfluxDB Endpoint IP provided: {0}'.format(
            influxdb_endpoint_ip))
        time.sleep(5)
        utils.wait_for_port(influxdb_endpoint_port, influxdb_endpoint_ip)
        _configure_influxdb(influxdb_endpoint_ip, influxdb_endpoint_port)
    else:
        influxdb_endpoint_ip = ctx.instance.host_ip
        _install_influxdb()

        utils.systemd.restart(SERVICE_NAME)

        utils.wait_for_port(influxdb_endpoint_port, influxdb_endpoint_ip)
        _configure_influxdb(influxdb_endpoint_ip, influxdb_endpoint_port)

        utils.systemd.stop(SERVICE_NAME)

    ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \
        influxdb_endpoint_ip
コード例 #3
0
def main():

    es_endpoint_ip = ctx.node.properties['es_endpoint_ip']
    es_endpoint_port = ctx.node.properties['es_endpoint_port']

    if not es_endpoint_ip:
        es_endpoint_ip = ctx.instance.host_ip
        _install_elasticsearch()

        utils.systemd.start('elasticsearch')
        utils.wait_for_port(es_endpoint_port, es_endpoint_ip)
        _configure_elasticsearch(host=es_endpoint_ip, port=es_endpoint_port)

        utils.systemd.stop('elasticsearch')
        utils.clean_var_log_dir('elasticsearch')
    else:
        ctx.logger.info('External Elasticsearch Endpoint provided: '
                        '{0}:{1}...'.format(es_endpoint_ip, es_endpoint_port))
        time.sleep(5)
        utils.wait_for_port(es_endpoint_port, es_endpoint_ip)
        ctx.logger.info('Checking if \'cloudify_storage\' '
                        'index already exists...')

        if http_request('http://{0}:{1}/cloudify_storage'.format(
                es_endpoint_ip, es_endpoint_port), method='HEAD'):
            utils.error_exit('\'cloudify_storage\' index already exists on '
                             '{0}, terminating bootstrap...'.format(
                                 es_endpoint_ip))
        _configure_elasticsearch(host=es_endpoint_ip, port=es_endpoint_port)

    ctx.instance.runtime_properties['es_endpoint_ip'] = es_endpoint_ip
コード例 #4
0
def bootstrap():
    wait_for_port(22)
    wait_for_cloud_init()
    this_instance = env.do_hosts[env.host_string]

    env.run("apt-get install -y curl software-properties-common")

    if "command not found" in env.run("docker --version"):
        print("Setting up docker")
        env.run(
            "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -",
            show=True)
        env.run("apt-key fingerprint 0EBFCD88", show=True)
        env.run(
            "add-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\"",
            show=True)
        env.run("apt-get update -y", show=True)
        env.run("apt-get install -y docker-ce", show=True)
        env.run("systemctl enable docker")
        env.run("systemctl restart docker")

    if not env.run("ls / | grep swapfile").stdout.strip():
        print("Setting up swap")
        env.run("dd if=/dev/zero of=/swapfile bs=1M count=4096", show=True)
        env.run("chmod 0600 /swapfile", show=True)
        env.run("mkswap /swapfile", show=True)
        env.run("swapon /swapfile", show=True)

    if not env.run("cat /etc/fstab | grep swapfile").stdout.strip():
        print("Adding swap to fstab")
        env.run("echo '/swapfile   none    swap    sw    0   0' >> /etc/fstab",
                show=True)
コード例 #5
0
def main():

    influxdb_endpoint_ip = ctx_properties['influxdb_endpoint_ip']
    # currently, cannot be changed due to webui not allowing to configure it.
    influxdb_endpoint_port = 8086

    if influxdb_endpoint_ip:
        ctx.logger.info('External InfluxDB Endpoint IP provided: {0}'.format(
            influxdb_endpoint_ip))
        time.sleep(5)
        utils.wait_for_port(influxdb_endpoint_port, influxdb_endpoint_ip)
        _configure_influxdb(influxdb_endpoint_ip, influxdb_endpoint_port)
    else:
        influxdb_endpoint_ip = ctx.instance.host_ip
        _install_influxdb()

        utils.systemd.restart(INFLUX_SERVICE_NAME)

        utils.wait_for_port(influxdb_endpoint_port, influxdb_endpoint_ip)
        _configure_influxdb(influxdb_endpoint_ip, influxdb_endpoint_port)

        utils.systemd.stop(INFLUX_SERVICE_NAME)

    ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \
        influxdb_endpoint_ip
コード例 #6
0
def main():

    es_endpoint_ip = ctx.node.properties['es_endpoint_ip']
    es_endpoint_port = ctx.node.properties['es_endpoint_port']

    if not es_endpoint_ip:
        es_endpoint_ip = ctx.instance.host_ip
        _install_elasticsearch()

        utils.systemd.start('elasticsearch')
        utils.wait_for_port(es_endpoint_port, es_endpoint_ip)
        _configure_elasticsearch(host=es_endpoint_ip, port=es_endpoint_port)

        utils.systemd.stop('elasticsearch')
        utils.clean_var_log_dir('elasticsearch')
    else:
        ctx.logger.info('External Elasticsearch Endpoint provided: '
                        '{0}:{1}...'.format(es_endpoint_ip, es_endpoint_port))
        time.sleep(5)
        utils.wait_for_port(es_endpoint_port, es_endpoint_ip)
        ctx.logger.info('Checking if \'cloudify_storage\' '
                        'index already exists...')

        if http_request('http://{0}:{1}/cloudify_storage'.format(
                es_endpoint_ip, es_endpoint_port),
                        method='HEAD'):
            utils.error_exit(
                '\'cloudify_storage\' index already exists on '
                '{0}, terminating bootstrap...'.format(es_endpoint_ip))
        _configure_elasticsearch(host=es_endpoint_ip, port=es_endpoint_port)

    ctx.instance.runtime_properties['es_endpoint_ip'] = es_endpoint_ip
コード例 #7
0
  def __init__(self):
    threading.Thread.__init__(self)

    self.host = 'localhost'
    self.port = get_unused_port()

    self.server = InMemorySMTP(self.host, self.port)
    wait_for_port(self.host, self.port)
コード例 #8
0
ファイル: server.py プロジェクト: sauter-hq/rethinkdb
 def checkOnServer(self):
     '''Check that the server is still running, throwing an error if it is not'''
     
     if self.__serverProcess.poll() is not None:
         self.__serverOutput.seek(0)
         output = self.__serverOutput.read().decode('utf-8')
         returnCode = self.__serverProcess.returncode
         self.endServer()
         raise Exception('http server died with signal %d. Output was:\n%s\n' % (returnCode, output))
     utils.wait_for_port(self.httpPort)
コード例 #9
0
    def __init__(self,
                 httpbinPort=0,
                 httpPort=0,
                 sslPort=0,
                 startupTimeout=20):
        '''Start a server, using subprocess to do it out-of-process'''

        # -- startup server

        runableFile = __file__.rstrip('c')

        self.__serverOutput = tempfile.NamedTemporaryFile(mode='w+')
        self.__serverProcess = subprocess.Popen([
            runableFile, '--httpbin-port',
            str(httpbinPort), '--http-port',
            str(httpPort), '--ssl-port',
            str(sslPort)
        ],
                                                stdout=self.__serverOutput,
                                                preexec_fn=os.setpgrp)

        # -- read port numbers

        portRegex = re.compile(
            '^\s+(?P<name>\w+).+:\s+(?P<port>\d+)\shttp\S+$')

        deadline = startupTimeout + time.time()
        serverLines = utils.nonblocking_readline(self.__serverOutput)
        while deadline > time.time():
            line = next(serverLines)
            if line is None:
                time.sleep(.1)
                continue
            parsedLine = portRegex.match(line)
            if parsedLine is not None:
                if parsedLine.group('name') == 'httpbin':
                    self.httpbinPort = int(parsedLine.group('port'))
                elif parsedLine.group('name') == 'http':
                    self.httpPort = int(parsedLine.group('port'))
                elif parsedLine.group('name') == 'ssl':
                    self.sslPort = int(parsedLine.group('port'))
            if all([self.httpbinPort, self.httpPort, self.sslPort]):
                utils.wait_for_port(self.httpPort,
                                    timeout=(deadline - time.time()))
                break
        else:
            raise Exception(
                'Timed out waiting %.2f secs for the http server to start' %
                startupTimeout)

        # -- set an at-exit to make sure we shut ourselves down

        atexit.register(self.endServer)
コード例 #10
0
def main():

    es_endpoint_ip = ctx_properties['es_endpoint_ip']
    es_endpoint_port = ctx_properties['es_endpoint_port']

    if utils.is_upgrade:
        dump_upgrade_data()

    if not es_endpoint_ip:
        es_endpoint_ip = ctx.instance.host_ip
        _install_elasticsearch()
        utils.systemd.restart(ES_SERVICE_NAME, append_prefix=False)
        utils.wait_for_port(es_endpoint_port, es_endpoint_ip)
        _configure_elasticsearch(host=es_endpoint_ip, port=es_endpoint_port)
        _wait_for_shards(es_endpoint_port, es_endpoint_ip)

        utils.clean_var_log_dir('elasticsearch')
    else:
        ctx.logger.info('External Elasticsearch Endpoint provided: '
                        '{0}:{1}...'.format(es_endpoint_ip, es_endpoint_port))
        time.sleep(5)
        utils.wait_for_port(es_endpoint_port, es_endpoint_ip)
        ctx.logger.info('Checking if \'cloudify_storage\' '
                        'index already exists...')

        if http_request('http://{0}:{1}/cloudify_storage'.format(
                es_endpoint_ip, es_endpoint_port),
                        method='HEAD').code == 200:
            ctx.abort_operation(
                '\'cloudify_storage\' index already exists on '
                '{0}, terminating bootstrap...'.format(es_endpoint_ip))
        _configure_elasticsearch(host=es_endpoint_ip, port=es_endpoint_port)

    if utils.is_upgrade or utils.is_rollback:
        restore_upgrade_data(es_endpoint_ip, es_endpoint_port)

    if not es_endpoint_port:
        utils.systemd.stop(ES_SERVICE_NAME, append_prefix=False)

    ctx.instance.runtime_properties['es_endpoint_ip'] = es_endpoint_ip
コード例 #11
0
def main():

    es_endpoint_ip = ctx_properties['es_endpoint_ip']
    es_endpoint_port = ctx_properties['es_endpoint_port']

    if utils.is_upgrade:
        dump_upgrade_data()

    if not es_endpoint_ip:
        es_endpoint_ip = ctx.instance.host_ip
        _install_elasticsearch()
        utils.systemd.restart(ES_SERVICE_NAME, append_prefix=False)
        utils.wait_for_port(es_endpoint_port, es_endpoint_ip)
        _configure_elasticsearch(host=es_endpoint_ip, port=es_endpoint_port)
        _wait_for_shards(es_endpoint_port, es_endpoint_ip)

        utils.clean_var_log_dir('elasticsearch')
    else:
        ctx.logger.info('External Elasticsearch Endpoint provided: '
                        '{0}:{1}...'.format(es_endpoint_ip, es_endpoint_port))
        time.sleep(5)
        utils.wait_for_port(es_endpoint_port, es_endpoint_ip)
        ctx.logger.info('Checking if \'cloudify_storage\' '
                        'index already exists...')

        if http_request('http://{0}:{1}/cloudify_storage'.format(
                es_endpoint_ip, es_endpoint_port), method='HEAD').code == 200:
            ctx.abort_operation('\'cloudify_storage\' index already exists on '
                                '{0}, terminating bootstrap...'.format(
                                    es_endpoint_ip))
        _configure_elasticsearch(host=es_endpoint_ip, port=es_endpoint_port)

    if utils.is_upgrade or utils.is_rollback:
        restore_upgrade_data(es_endpoint_ip, es_endpoint_port)

    if not es_endpoint_port:
        utils.systemd.stop(ES_SERVICE_NAME, append_prefix=False)

    ctx.instance.runtime_properties['es_endpoint_ip'] = es_endpoint_ip
コード例 #12
0
ファイル: server.py プロジェクト: AtnNn/rethinkdb
 def __init__(self, httpbinPort=0, httpPort=0, sslPort=0, startupTimeout=20):
     '''Start a server, using subprocess to do it out-of-process'''
     
     # -- startup server
     
     runableFile = __file__.rstrip('c')
     
     self.__serverOutput = tempfile.NamedTemporaryFile(mode='w+')
     self.__serverProcess = subprocess.Popen([runableFile, '--httpbin-port', str(httpbinPort), '--http-port', str(httpPort), '--ssl-port', str(sslPort)], stdout=self.__serverOutput, preexec_fn=os.setpgrp)
     
     # -- read port numbers
     
     portRegex = re.compile('^\s+(?P<name>\w+).+:\s+(?P<port>\d+)\shttp\S+$')
     
     deadline = startupTimeout + time.time()
     serverLines = utils.nonblocking_readline(self.__serverOutput)
     while deadline > time.time():
         line = next(serverLines)
         if line is None:
             time.sleep(.1)
             continue
         parsedLine = portRegex.match(line)
         if parsedLine is not None:
             if parsedLine.group('name') == 'httpbin':
                 self.httpbinPort = int(parsedLine.group('port'))
             elif parsedLine.group('name') == 'http':
                 self.httpPort = int(parsedLine.group('port'))
             elif parsedLine.group('name') == 'ssl':
                 self.sslPort = int(parsedLine.group('port'))
         if all([self.httpbinPort, self.httpPort, self.sslPort]):
             utils.wait_for_port(self.httpPort, timeout=(deadline - time.time()))
             break
     else:
         raise Exception('Timed out waiting %.2f secs for the http server to start' % startupTimeout)
     
     # -- set an at-exit to make sure we shut ourselves down
     
     atexit.register(self.endServer)
コード例 #13
0
ファイル: create.py プロジェクト: ptanX/cloudify
def _install_rabbitmq():
    erlang_rpm_source_url = ctx_properties['erlang_rpm_source_url']
    rabbitmq_rpm_source_url = ctx_properties['rabbitmq_rpm_source_url']
    # TODO: maybe we don't need this env var
    os.putenv('RABBITMQ_FD_LIMIT', str(ctx_properties['rabbitmq_fd_limit']))
    rabbitmq_username = ctx_properties['rabbitmq_username']
    rabbitmq_password = ctx_properties['rabbitmq_password']

    ctx.logger.info('Installing RabbitMQ...')
    utils.set_selinux_permissive()

    utils.copy_notice(SERVICE_NAME)
    utils.mkdir(LOG_DIR)

    utils.yum_install(erlang_rpm_source_url, service_name=SERVICE_NAME)
    utils.yum_install(rabbitmq_rpm_source_url, service_name=SERVICE_NAME)

    utils.logrotate(SERVICE_NAME)

    utils.systemd.configure(SERVICE_NAME)

    ctx.logger.info('Configuring File Descriptors Limit...')
    utils.deploy_blueprint_resource(
        '{0}/rabbitmq_ulimit.conf'.format(CONFIG_PATH), FD_LIMIT_PATH,
        SERVICE_NAME)

    utils.deploy_blueprint_resource(
        '{0}/rabbitmq-definitions.json'.format(CONFIG_PATH),
        join(HOME_DIR, 'definitions.json'), SERVICE_NAME)

    # This stops rabbit from failing if the host name changes, e.g. when
    # a manager is deployed from an image but given a new hostname.
    # This is likely to cause problems with clustering of rabbitmq if this is
    # done at any point, so at that point a change to the file and cleaning of
    # mnesia would likely be necessary.
    utils.deploy_blueprint_resource(
        '{0}/rabbitmq-env.conf'.format(CONFIG_PATH),
        '/etc/rabbitmq/rabbitmq-env.conf', SERVICE_NAME)
    # Delete old mnesia node
    utils.sudo(['rm', '-rf', '/var/lib/rabbitmq/mnesia'])

    utils.systemd.systemctl('daemon-reload')

    utils.chown('rabbitmq', 'rabbitmq', LOG_DIR)

    # rabbitmq restart exits with 143 status code that is valid in this case.
    utils.systemd.restart(SERVICE_NAME, ignore_failure=True)

    time.sleep(10)
    utils.wait_for_port(5672)

    ctx.logger.info('Enabling RabbitMQ Plugins...')
    # Occasional timing issues with rabbitmq starting have resulted in
    # failures when first trying to enable plugins
    utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_management'],
               retries=5)
    utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_tracing'], retries=5)

    _clear_guest_permissions_if_guest_exists()
    _create_user_and_set_permissions(rabbitmq_username, rabbitmq_password)

    utils.deploy_blueprint_resource('{0}/rabbitmq.config'.format(CONFIG_PATH),
                                    join(HOME_DIR, 'rabbitmq.config'),
                                    SERVICE_NAME,
                                    user_resource=True)

    utils.systemd.stop(SERVICE_NAME, retries=5)
コード例 #14
0
def deploy_prod_services(container_to_deploy=None):
    wait_for_port(22)
    wait_for_cloud_init()
    this_instance = env.do_hosts[env.host_string]

    sudo("mkdir -p /home/ubuntu/serv_files")
    rsync_project(local_dir=".",
                  exclude=[".git"],
                  remote_dir="/home/ubuntu/serv_files",
                  delete=True,
                  extra_opts="--rsync-path=\"sudo rsync\"",
                  ssh_opts='-oStrictHostKeyChecking=no')

    with cd("/home/ubuntu/serv_files"):
        active_networks = []
        active_volumes = []
        active_containers = []

        for domain, x in env.stack.items():
            if domain not in docker_manager.get_networks():
                docker_manager.add_network(domain,
                                           subnet="11.0.0.0/16",
                                           gateway="11.0.0.1")
            active_networks.append(domain)

            for instance, x in x.items():
                if instance != this_instance:
                    continue

                for volume in x.get("volumes", []):
                    if volume not in docker_manager.get_volumes():
                        docker_manager.add_volume(volume)
                    active_volumes.append(volume)

                for container, x in x["containers"].items():
                    if container_to_deploy and container != container_to_deploy:
                        continue
                    volumes = x.get("volumes", {})
                    image_name = None
                    if "build" in x:
                        image_name = container
                        docker_manager.build_image(x["build"],
                                                   container,
                                                   docker_file=x.get(
                                                       "docker_file",
                                                       "Dockerfile"))
                    elif "run" in x:
                        image_name = x["run"]
                        docker_manager.pull_image(x["run"])
                    image = docker_manager.get_images(get_all=True)[image_name]
                    add_container_parms = {
                        "image": image_name,
                        "name": container,
                        "privileged": x.get("privileged", False),
                        "network": domain,
                        "volumes": volumes,
                        "expose": x.get("expose", {}),
                        "envs": x.get("env"),
                    }
                    nonce = get_nonce(add_container_parms)

                    if (container in docker_manager.get_containers() and
                        (docker_manager.get_containers()[container]["Config"]
                         ["Labels"].get("NONCE") != nonce
                         or not docker_manager.get_containers()[container]
                         ["Image"].startswith("sha256:%s" % image["Id"]))):
                        docker_manager.remove_container(container)

                    if container not in docker_manager.get_containers():
                        add_container_parms["nonce"] = nonce
                        docker_manager.add_container(**add_container_parms)
                    active_containers.append(container)

        if not container_to_deploy:
            for container in docker_manager.get_containers():
                if container not in active_containers:
                    print("Unneeded container found: %s " % container)
                    docker_manager.remove_container(container)
            for volume in docker_manager.get_volumes():
                if volume not in active_volumes:
                    print(
                        "Unneeded volume detected: %s, you need to remove it manually"
                        % volume)

            for network in docker_manager.get_networks():
                if network not in active_networks:
                    print("Unneeded network found: %s " % network)
                    docker_manager.remove_network(network)
コード例 #15
0
    # For some reason, it fails. Need to check.

    events_queue_message_ttl = ctx.node.properties[
        'rabbitmq_events_queue_message_ttl']
    logs_queue_message_ttl = ctx.node.properties[
        'rabbitmq_logs_queue_message_ttl']
    metrics_queue_message_ttl = ctx.node.properties[
        'rabbitmq_metrics_queue_message_ttl']
    events_queue_length_limit = ctx.node.properties[
        'rabbitmq_events_queue_length_limit']
    logs_queue_length_limit = ctx.node.properties[
        'rabbitmq_logs_queue_length_limit']
    metrics_queue_length_limit = ctx.node.properties[
        'rabbitmq_metrics_queue_length_limit']

    utils.wait_for_port(5672)
    time.sleep(10)

    logs_queue_message_policy = {
        'message-ttl': logs_queue_message_ttl,
        'max-length': logs_queue_length_limit
    }
    events_queue_message_policy = {
        'message-ttl': events_queue_message_ttl,
        'max-length': events_queue_length_limit
    }
    metrics_queue_message_policy = {
        'message-ttl': metrics_queue_message_ttl,
        'max-length': metrics_queue_length_limit
    }
    riemann_deployment_queues_message_ttl = {
コード例 #16
0
    # For some reason, it fails. Need to check.

    events_queue_message_ttl = ctx_properties[
        'rabbitmq_events_queue_message_ttl']
    logs_queue_message_ttl = ctx_properties[
        'rabbitmq_logs_queue_message_ttl']
    metrics_queue_message_ttl = ctx_properties[
        'rabbitmq_metrics_queue_message_ttl']
    events_queue_length_limit = ctx_properties[
        'rabbitmq_events_queue_length_limit']
    logs_queue_length_limit = ctx_properties[
        'rabbitmq_logs_queue_length_limit']
    metrics_queue_length_limit = ctx_properties[
        'rabbitmq_metrics_queue_length_limit']

    utils.wait_for_port(5672)
    time.sleep(10)

    logs_queue_message_policy = {
        'message-ttl': logs_queue_message_ttl,
        'max-length': logs_queue_length_limit
    }
    events_queue_message_policy = {
        'message-ttl': events_queue_message_ttl,
        'max-length': events_queue_length_limit
    }
    metrics_queue_message_policy = {
        'message-ttl': metrics_queue_message_ttl,
        'max-length': metrics_queue_length_limit
    }
    riemann_deployment_queues_message_ttl = {
コード例 #17
0
# rabbitmq restart exits with 143 status code that is valid in this case.
utils.systemd.restart(SERVICE_NAME, ignore_failure=True)
# This should be done in the create script.
# For some reason, it fails. Need to check.

events_queue_message_ttl = ctx_properties['rabbitmq_events_queue_message_ttl']
logs_queue_message_ttl = ctx_properties['rabbitmq_logs_queue_message_ttl']
metrics_queue_message_ttl = ctx_properties[
    'rabbitmq_metrics_queue_message_ttl']
events_queue_length_limit = ctx_properties[
    'rabbitmq_events_queue_length_limit']
logs_queue_length_limit = ctx_properties['rabbitmq_logs_queue_length_limit']
metrics_queue_length_limit = ctx_properties[
    'rabbitmq_metrics_queue_length_limit']

utils.wait_for_port(PORT)
time.sleep(10)

logs_queue_message_policy = {
    'message-ttl': logs_queue_message_ttl,
    'max-length': logs_queue_length_limit
}
events_queue_message_policy = {
    'message-ttl': events_queue_message_ttl,
    'max-length': events_queue_length_limit
}
metrics_queue_message_policy = {
    'message-ttl': metrics_queue_message_ttl,
    'max-length': metrics_queue_length_limit
}
riemann_deployment_queues_message_ttl = {
コード例 #18
0
 def start(self):
     super(TornadoTestInstance, self).start()
     wait_for_port(self.host, self.port)
コード例 #19
0
def _install_rabbitmq():
    erlang_rpm_source_url = ctx_properties['erlang_rpm_source_url']
    rabbitmq_rpm_source_url = ctx_properties['rabbitmq_rpm_source_url']
    # TODO: maybe we don't need this env var
    os.putenv('RABBITMQ_FD_LIMIT', str(ctx_properties['rabbitmq_fd_limit']))
    rabbitmq_username = ctx_properties['rabbitmq_username']
    rabbitmq_password = ctx_properties['rabbitmq_password']

    ctx.logger.info('Installing RabbitMQ...')
    utils.set_selinux_permissive()

    utils.copy_notice(SERVICE_NAME)
    utils.mkdir(LOG_DIR)

    utils.yum_install(erlang_rpm_source_url, service_name=SERVICE_NAME)
    utils.yum_install(rabbitmq_rpm_source_url, service_name=SERVICE_NAME)

    utils.logrotate(SERVICE_NAME)

    utils.systemd.configure(SERVICE_NAME)

    ctx.logger.info('Configuring File Descriptors Limit...')
    utils.deploy_blueprint_resource(
        '{0}/rabbitmq_ulimit.conf'.format(CONFIG_PATH),
        FD_LIMIT_PATH,
        SERVICE_NAME)

    utils.deploy_blueprint_resource(
        '{0}/rabbitmq-definitions.json'.format(CONFIG_PATH),
        join(HOME_DIR, 'definitions.json'),
        SERVICE_NAME)

    # This stops rabbit from failing if the host name changes, e.g. when
    # a manager is deployed from an image but given a new hostname.
    # This is likely to cause problems with clustering of rabbitmq if this is
    # done at any point, so at that point a change to the file and cleaning of
    # mnesia would likely be necessary.
    utils.deploy_blueprint_resource(
        '{0}/rabbitmq-env.conf'.format(CONFIG_PATH),
        '/etc/rabbitmq/rabbitmq-env.conf',
        SERVICE_NAME)
    # Delete old mnesia node
    utils.sudo(['rm', '-rf', '/var/lib/rabbitmq/mnesia'])

    utils.systemd.systemctl('daemon-reload')

    utils.chown('rabbitmq', 'rabbitmq', LOG_DIR)

    # rabbitmq restart exits with 143 status code that is valid in this case.
    utils.systemd.restart(SERVICE_NAME, ignore_failure=True)

    time.sleep(10)
    utils.wait_for_port(5672)

    ctx.logger.info('Enabling RabbitMQ Plugins...')
    # Occasional timing issues with rabbitmq starting have resulted in
    # failures when first trying to enable plugins
    utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_management'],
               retries=5)
    utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_tracing'], retries=5)

    _clear_guest_permissions_if_guest_exists()
    _create_user_and_set_permissions(rabbitmq_username, rabbitmq_password)

    utils.deploy_blueprint_resource(
        '{0}/rabbitmq.config'.format(CONFIG_PATH),
        join(HOME_DIR, 'rabbitmq.config'),
        SERVICE_NAME, user_resource=True)

    utils.systemd.stop(SERVICE_NAME, retries=5)