コード例 #1
0
def verify_nginx(url):
    """Check if the response looks like a correct REST service response.

    We can get a 200, or a 401 in case auth is enabled. We don't expect a
    502, though, as this would mean nginx isn't correctly proxying to
    the REST service.
    """
    # We can't easily use python's urllib here, because we would need
    # to keep compatibility between python versions that do https
    # cert verification (2.7.9+, and 2.7.5 with centos patches starting from
    # 2.7.5-58), and ones that don't.
    # Instead, we use curl, because it behaves consistently across distro
    # versions.
    # See also CFY-7222
    output = utils.run([
        'curl',
        url,
        '--cacert',
        utils.INTERNAL_CA_CERT_PATH,
        # only output the http code
        '-o',
        '/dev/null',
        '-w',
        '%{http_code}'
    ])
    if output.aggr_stdout.strip() not in {'200', '401'}:
        ctx.abort_operation('Nginx HTTP check error: {0}'.format(output))
コード例 #2
0
def _set_security(rabbitmq_ssl_enabled,
                  rabbitmq_cert_private,
                  rabbitmq_cert_public):
    # Deploy certificates if both have been provided.
    # Complain loudly if one has been provided and the other hasn't.
    if rabbitmq_ssl_enabled:
        if rabbitmq_cert_private and rabbitmq_cert_public:
            utils.deploy_ssl_certificate(
                'private', '/etc/rabbitmq/rabbit-priv.pem',
                'rabbitmq', rabbitmq_cert_private)
            utils.deploy_ssl_certificate(
                'public', '/etc/rabbitmq/rabbit-pub.pem',
                'rabbitmq', rabbitmq_cert_public)
            # Configure for SSL

            utils.deploy_blueprint_resource(
                '{0}/rabbitmq.config-ssl'.format(CONFIG_PATH),
                '/etc/rabbitmq/rabbitmq.config',
                RABBITMQ_SERVICE_NAME, user_resource=True)
        else:
            ctx.abort_operation('When providing a certificate for rabbitmq, '
                                'both public and private certificates must be '
                                'supplied.')
    else:

        utils.deploy_blueprint_resource(
            '{0}/rabbitmq.config-nossl'.format(CONFIG_PATH),
            '/etc/rabbitmq/rabbitmq.config',
            RABBITMQ_SERVICE_NAME, user_resource=True)
        if rabbitmq_cert_private or rabbitmq_cert_public:
            ctx.logger.warn('Broker SSL cert supplied but SSL not enabled '
                            '(broker_ssl_enabled is False).')
コード例 #3
0
def _set_security(rabbitmq_ssl_enabled, rabbitmq_cert_private,
                  rabbitmq_cert_public):
    # Deploy certificates if both have been provided.
    # Complain loudly if one has been provided and the other hasn't.
    if rabbitmq_ssl_enabled:
        if rabbitmq_cert_private and rabbitmq_cert_public:
            utils.deploy_ssl_certificate('private',
                                         '/etc/rabbitmq/rabbit-priv.pem',
                                         'rabbitmq', rabbitmq_cert_private)
            utils.deploy_ssl_certificate('public',
                                         '/etc/rabbitmq/rabbit-pub.pem',
                                         'rabbitmq', rabbitmq_cert_public)
            # Configure for SSL

            utils.deploy_blueprint_resource(
                '{0}/rabbitmq.config-ssl'.format(CONFIG_PATH),
                '/etc/rabbitmq/rabbitmq.config',
                RABBITMQ_SERVICE_NAME,
                user_resource=True)
        else:
            ctx.abort_operation('When providing a certificate for rabbitmq, '
                                'both public and private certificates must be '
                                'supplied.')
    else:

        utils.deploy_blueprint_resource(
            '{0}/rabbitmq.config-nossl'.format(CONFIG_PATH),
            '/etc/rabbitmq/rabbitmq.config',
            RABBITMQ_SERVICE_NAME,
            user_resource=True)
        if rabbitmq_cert_private or rabbitmq_cert_public:
            ctx.logger.warn('Broker SSL cert supplied but SSL not enabled '
                            '(broker_ssl_enabled is False).')
コード例 #4
0
def dump_upgrade_data():

    if os.path.exists(DUMP_SUCCESS_FLAG):
        return

    endpoint = _get_es_install_endpoint()
    port = _get_es_install_port()
    storage_endpoint = 'http://{0}:{1}/cloudify_storage'.format(endpoint, port)
    types = ['provider_context', 'snapshot']
    ctx.logger.info('Dumping upgrade data: {0}'.format(types))
    type_values = []
    for _type in types:
        res = http_request('{0}/_search?q=_type:{1}&size=10000'.format(
            storage_endpoint, _type),
                           method='GET')
        if not res.code == 200:
            ctx.abort_operation('Failed fetching type {0} from '
                                'cloudify_storage index'.format(_type))

        body = res.read()
        hits = json.loads(body)['hits']['hits']
        for hit in hits:
            type_values.append(hit)

    utils.mkdir(utils.ES_UPGRADE_DUMP_PATH, use_sudo=False)
    with open(DUMP_FILE_PATH, 'w') as f:
        for item in type_values:
            f.write(json.dumps(item) + os.linesep)

    # marker file to indicate dump has succeeded
    with open(DUMP_SUCCESS_FLAG, 'w') as f:
        f.write('success')
コード例 #5
0
def validate():
    ignore_validations = ctx.node.properties['ignore_bootstrap_validations']
    resources_package_url = ctx.node.properties['manager_resources_package']
    physical_memory = \
        ctx.node.properties['minimum_required_total_physical_memory_in_mb']
    disk_space = \
        ctx.node.properties['minimum_required_available_disk_space_in_gb']

    error_summary = []

    error_summary.append(_validate_python_version(
        expected_major_version=2, expected_minor_version=7))
    error_summary.append(_validate_supported_distros(
        supported_distros=('centos', 'redhat'),
        supported_versions=('7')))
    error_summary.append(_validate_sufficient_memory(
        min_memory_required_in_mb=physical_memory))
    error_summary.append(_validate_sufficient_disk_space(
        min_disk_space_required_in_gb=disk_space))
    if resources_package_url:
        error_summary.append(_validate_resources_package_url(
            resources_package_url))

    # if no error occurred in a validation, we need to remove its reference.
    error_summary = [error for error in error_summary if error]
    if error_summary:
        printable_error_summary = '\n' + '\n\t'.join(error_summary)
        if ignore_validations:
            ctx.logger.warn('Ignoring validation errors. {0}'.format(
                printable_error_summary))
        else:
            ctx.abort_operation(printable_error_summary)
コード例 #6
0
def dump_upgrade_data():

    if os.path.exists(DUMP_SUCCESS_FLAG):
        return

    endpoint = _get_es_install_endpoint()
    port = _get_es_install_port()
    storage_endpoint = 'http://{0}:{1}/cloudify_storage'.format(endpoint,
                                                                port)
    types = ['provider_context', 'snapshot']
    ctx.logger.info('Dumping upgrade data: {0}'.format(types))
    type_values = []
    for _type in types:
        res = http_request('{0}/_search?q=_type:{1}&size=10000'
                           .format(storage_endpoint, _type),
                           method='GET')
        if not res.code == 200:
            ctx.abort_operation('Failed fetching type {0} from '
                                'cloudify_storage index'.format(_type))

        body = res.read()
        hits = json.loads(body)['hits']['hits']
        for hit in hits:
            type_values.append(hit)

    utils.mkdir(UPGRADE_DUMP_PATH, use_sudo=False)
    with open(DUMP_FILE_PATH, 'w') as f:
        for item in type_values:
            f.write(json.dumps(item) + os.linesep)

    # marker file to indicate dump has succeeded
    with open(DUMP_SUCCESS_FLAG, 'w') as f:
        f.write('success')
コード例 #7
0
def validate():
    ignore_validations = ctx.node.properties['ignore_bootstrap_validations']
    resources_package_url = ctx.node.properties['manager_resources_package']
    physical_memory = \
        ctx.node.properties['minimum_required_total_physical_memory_in_mb']
    disk_space = \
        ctx.node.properties['minimum_required_available_disk_space_in_gb']

    error_summary = []

    error_summary.append(
        _validate_python_version(expected_major_version=2,
                                 expected_minor_version=7))
    error_summary.append(
        _validate_supported_distros(supported_distros=('centos', 'redhat'),
                                    supported_versions=('7')))
    error_summary.append(
        _validate_sufficient_memory(min_memory_required_in_mb=physical_memory))
    error_summary.append(
        _validate_sufficient_disk_space(
            min_disk_space_required_in_gb=disk_space))
    if resources_package_url:
        error_summary.append(
            _validate_resources_package_url(resources_package_url))

    # if no error occurred in a validation, we need to remove its reference.
    error_summary = [error for error in error_summary if error]
    if error_summary:
        printable_error_summary = '\n' + '\n\t'.join(error_summary)
        if ignore_validations:
            ctx.logger.warn('Ignoring validation errors. {0}'.format(
                printable_error_summary))
        else:
            ctx.abort_operation(printable_error_summary)
コード例 #8
0
def _configure_influxdb(host, port):
    db_user = "******"
    db_pass = "******"
    db_name = "cloudify"

    ctx.logger.info('Creating InfluxDB Database...')

    # the below request is equivalent to running:
    # curl -S -s "http://localhost:8086/db?u=root&p=root" '-d "{\"name\": \"cloudify\"}"  # NOQA
    import urllib
    import urllib2
    import ast

    endpoint_for_list = 'http://{0}:{1}/db'.format(host, port)
    endpoint_for_creation = ('http://{0}:{1}/cluster/database_configs/'
                             '{2}'.format(host, port, db_name))
    params = urllib.urlencode(dict(u=db_user, p=db_pass))
    url_for_list = endpoint_for_list + '?' + params
    url_for_creation = endpoint_for_creation + '?' + params

    # check if db already exists
    db_list = eval(urllib2.urlopen(urllib2.Request(url_for_list)).read())
    try:
        assert not any(d.get('name') == db_name for d in db_list)
    except AssertionError:
        ctx.logger.info('Database {0} already exists!'.format(db_name))
        return

    try:
        utils.deploy_blueprint_resource(
            '{0}/retention.json'.format(CONFIG_PATH),
            '/tmp/retention.json', INFLUX_SERVICE_NAME)
        with open('/tmp/retention.json') as policy_file:
            retention_policy = policy_file.read()
        ctx.logger.debug(
            'Using retention policy: \n{0}'.format(retention_policy))
        data = json.dumps(ast.literal_eval(retention_policy))
        ctx.logger.debug('Using retention policy: \n{0}'.format(data))
        content_length = len(data)
        request = urllib2.Request(url_for_creation, data, {
            'Content-Type': 'application/json',
            'Content-Length': content_length})
        ctx.logger.debug('Request is: {0}'.format(request))
        request_reader = urllib2.urlopen(request)
        response = request_reader.read()
        ctx.logger.debug('Response: {0}'.format(response))
        request_reader.close()
        utils.remove('/tmp/retention.json')

    except Exception as ex:
        ctx.abort_operation('Failed to create: {0} ({1}).'.format(db_name, ex))

    # verify db created
    ctx.logger.info('Verifying database create successfully...')
    db_list = eval(urllib2.urlopen(urllib2.Request(url_for_list)).read())
    try:
        assert any(d.get('name') == db_name for d in db_list)
    except AssertionError:
        ctx.abort_operation('Verification failed!')
    ctx.logger.info('Databased {0} created successfully.'.format(db_name))
コード例 #9
0
def create(args, **_):
    """Create a linode

    if existing resource provided:
        if the resource actually exists:
            use it
        else:
            if create if missing:
                create the resource
            else:
                fail since it doesn't exist but should
    else:
        assert quota if possible
        create resource even if one already exists of the same name(property)
        verify created
        use the created resource
    set the resource's context (the resource will have a uuid, instance_id, node_id, deployment_id and blueprint_id)  # NOQA
    set the resource's properties
    """
    # TODO: look for an available linode of the same type if it exists
    # and use that instead if a property is provided where the user asks
    # to `find_existing_resource`

    # TODO: should this be abstracted?
    credentials = _get_credentials(args)

    linode = _create_linode(args, credentials)
    if not _linode_created(linode):
        ctx.abort_operation('Failed to create resource')
    _use_resource(linode.id)
    _set_linode_context()
    _set_linode_properties(linode)
コード例 #10
0
def install_riemann():
    langohr_source_url = ctx_properties['langohr_jar_source_url']
    daemonize_source_url = ctx_properties['daemonize_rpm_source_url']
    riemann_source_url = ctx_properties['riemann_rpm_source_url']
    # Needed for Riemann's config
    cloudify_resources_url = ctx_properties['cloudify_resources_url']
    rabbitmq_username = ctx_properties['rabbitmq_username']
    rabbitmq_password = ctx_properties['rabbitmq_password']

    riemann_config_path = '/etc/riemann'
    riemann_log_path = '/var/log/cloudify/riemann'
    langohr_home = '/opt/lib'
    extra_classpath = '{0}/langohr.jar'.format(langohr_home)

    # Confirm username and password have been supplied for broker before
    # continuing.
    # Components other than logstash and riemann have this handled in code.
    # Note that these are not directly used in this script, but are used by the
    # deployed resources, hence the check here.
    if not rabbitmq_username or not rabbitmq_password:
        ctx.abort_operation(
            'Both rabbitmq_username and rabbitmq_password must be supplied '
            'and at least 1 character long in the manager blueprint inputs.')

    rabbit_props = utils.ctx_factory.get('rabbitmq')
    ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \
        utils.get_rabbitmq_endpoint_ip(
                rabbit_props.get('rabbitmq_endpoint_ip'))
    ctx.instance.runtime_properties['rabbitmq_username'] = \
        rabbit_props.get('rabbitmq_username')
    ctx.instance.runtime_properties['rabbitmq_password'] = \
        rabbit_props.get('rabbitmq_password')

    ctx.logger.info('Installing Riemann...')
    utils.set_selinux_permissive()

    utils.copy_notice(RIEMANN_SERVICE_NAME)
    utils.mkdir(riemann_log_path)
    utils.mkdir(langohr_home)
    utils.mkdir(riemann_config_path)
    utils.mkdir('{0}/conf.d'.format(riemann_config_path))

    langohr = utils.download_cloudify_resource(langohr_source_url,
                                               RIEMANN_SERVICE_NAME)
    utils.sudo(['cp', langohr, extra_classpath])
    ctx.logger.info('Applying Langohr permissions...')
    utils.sudo(['chmod', '644', extra_classpath])
    utils.yum_install(daemonize_source_url, service_name=RIEMANN_SERVICE_NAME)
    utils.yum_install(riemann_source_url, service_name=RIEMANN_SERVICE_NAME)

    utils.logrotate(RIEMANN_SERVICE_NAME)

    ctx.logger.info('Downloading cloudify-manager Repository...')
    manager_repo = utils.download_cloudify_resource(cloudify_resources_url,
                                                    RIEMANN_SERVICE_NAME)
    ctx.logger.info('Extracting Manager Repository...')
    utils.untar(manager_repo, '/tmp')
コード例 #11
0
def install_riemann():
    langohr_source_url = ctx_properties['langohr_jar_source_url']
    daemonize_source_url = ctx_properties['daemonize_rpm_source_url']
    riemann_source_url = ctx_properties['riemann_rpm_source_url']
    # Needed for Riemann's config
    cloudify_resources_url = ctx_properties['cloudify_resources_url']
    rabbitmq_username = ctx_properties['rabbitmq_username']
    rabbitmq_password = ctx_properties['rabbitmq_password']

    riemann_config_path = '/etc/riemann'
    riemann_log_path = '/var/log/cloudify/riemann'
    langohr_home = '/opt/lib'
    extra_classpath = '{0}/langohr.jar'.format(langohr_home)

    # Confirm username and password have been supplied for broker before
    # continuing.
    # Components other than logstash and riemann have this handled in code.
    # Note that these are not directly used in this script, but are used by the
    # deployed resources, hence the check here.
    if not rabbitmq_username or not rabbitmq_password:
        ctx.abort_operation(
            'Both rabbitmq_username and rabbitmq_password must be supplied '
            'and at least 1 character long in the manager blueprint inputs.')

    rabbit_props = utils.ctx_factory.get('rabbitmq')
    ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \
        utils.get_rabbitmq_endpoint_ip(
                rabbit_props.get('rabbitmq_endpoint_ip'))
    ctx.instance.runtime_properties['rabbitmq_username'] = \
        rabbit_props.get('rabbitmq_username')
    ctx.instance.runtime_properties['rabbitmq_password'] = \
        rabbit_props.get('rabbitmq_password')

    ctx.logger.info('Installing Riemann...')
    utils.set_selinux_permissive()

    utils.copy_notice(RIEMANN_SERVICE_NAME)
    utils.mkdir(riemann_log_path)
    utils.mkdir(langohr_home)
    utils.mkdir(riemann_config_path)
    utils.mkdir('{0}/conf.d'.format(riemann_config_path))

    langohr = utils.download_cloudify_resource(langohr_source_url,
                                               RIEMANN_SERVICE_NAME)
    utils.sudo(['cp', langohr, extra_classpath])
    ctx.logger.info('Applying Langohr permissions...')
    utils.sudo(['chmod', '644', extra_classpath])
    utils.yum_install(daemonize_source_url, service_name=RIEMANN_SERVICE_NAME)
    utils.yum_install(riemann_source_url, service_name=RIEMANN_SERVICE_NAME)

    utils.logrotate(RIEMANN_SERVICE_NAME)

    ctx.logger.info('Downloading cloudify-manager Repository...')
    manager_repo = utils.download_cloudify_resource(cloudify_resources_url,
                                                    RIEMANN_SERVICE_NAME)
    ctx.logger.info('Extracting Manager Repository...')
    utils.untar(manager_repo, '/tmp')
コード例 #12
0
def _assert_webserver_running():
    resp = utils.http_request('http://localhost:8080',
                              method='GET',
                              timeout=10)

    if not resp:
        ctx.abort_operation("Can't connect to webserver")
    if resp.code != 200:
        ctx.abort_operation('Sanity app webserver failed to start')
コード例 #13
0
def install_riemann():
    langohr_source_url = ctx_properties['langohr_jar_source_url']
    daemonize_source_url = ctx_properties['daemonize_rpm_source_url']
    riemann_source_url = ctx_properties['riemann_rpm_source_url']

    utils.create_service_user(user=RIEMANN_USER,
                              group=RIEMANN_GROUP,
                              home=utils.CLOUDIFY_HOME_DIR)

    riemann_config_path = '/etc/riemann'
    riemann_log_path = '/var/log/cloudify/riemann'
    langohr_home = '/opt/lib'
    extra_classpath = '{0}/langohr.jar'.format(langohr_home)
    riemann_dir = '/opt/riemann'

    # Confirm username and password have been supplied for broker before
    # continuing.
    # Components other than logstash and riemann have this handled in code.
    # Note that these are not directly used in this script, but are used by the
    # deployed resources, hence the check here.
    rabbitmq_username = ctx_properties['rabbitmq_username']
    rabbitmq_password = ctx_properties['rabbitmq_password']
    if not rabbitmq_username or not rabbitmq_password:
        ctx.abort_operation(
            'Both rabbitmq_username and rabbitmq_password must be supplied '
            'and at least 1 character long in the manager blueprint inputs.')

    runtime_props['rabbitmq_endpoint_ip'] = utils.get_rabbitmq_endpoint_ip()

    ctx.logger.info('Installing Riemann...')
    utils.set_selinux_permissive()

    utils.copy_notice(RIEMANN_SERVICE_NAME)
    utils.mkdir(riemann_log_path)
    utils.mkdir(langohr_home)
    utils.mkdir(riemann_config_path)
    utils.mkdir('{0}/conf.d'.format(riemann_config_path))

    # utils.chown cannot be used as it will change both user and group
    utils.sudo(['chown', RIEMANN_USER, riemann_dir])

    langohr = utils.download_cloudify_resource(langohr_source_url,
                                               RIEMANN_SERVICE_NAME)
    utils.sudo(['cp', langohr, extra_classpath])
    ctx.logger.info('Applying Langohr permissions...')
    utils.sudo(['chmod', '644', extra_classpath])
    utils.yum_install(daemonize_source_url, service_name=RIEMANN_SERVICE_NAME)
    utils.yum_install(riemann_source_url, service_name=RIEMANN_SERVICE_NAME)

    utils.chown(RIEMANN_USER, RIEMANN_GROUP, riemann_log_path)

    utils.logrotate(RIEMANN_SERVICE_NAME)

    files_to_remove = [
        riemann_config_path, riemann_log_path, extra_classpath, riemann_dir
    ]
    runtime_props['files_to_remove'] = files_to_remove
コード例 #14
0
def _assert_completed(linode):
    linode_status = _get_linode_status(linode)
    if linode_status == 'shutting_down':
        ctx.operation.retry(
            message='Waiting for operation to complete. Retrying...',
            retry_after=30)
    elif linode_status == 'offline':
        ctx.logger.info('Linode shutdown successfully')
    else:
        ctx.abort_operation('Linode shutdown failed')
コード例 #15
0
def _assert_webserver_running():
    resp = utils.http_request(
        'http://localhost:8080',
        method='GET',
        timeout=10)

    if not resp:
        ctx.abort_operation("Can't connect to webserver")
    if resp.code != 200:
        ctx.abort_operation('Sanity app webserver failed to start')
コード例 #16
0
def _get_credentials(args):
    credentials = args.get('token')
    credentials = credentials or \
        common._get_credentials('linode').get('token')
    if not credentials:
        ctx.abort_operation(
            'Could not retrieve credentials. '
            'You should either supply credentials in the blueprint, '
            'provide a credentials file to look in or have credential files '
            'under one of: {0}'.format(CREDENTIALS_FILE_PATHS))
コード例 #17
0
def wait_for_workflow(client, deployment_id, workflow_id):
    executions = client.executions.list(deployment_id=deployment_id)
    for execution in executions:
        if execution.workflow_id == workflow_id:
            execution_status = execution.status
            if execution_status == 'terminated':
                return True
            elif execution_status == 'failed':
                ctx.abort_operation('Execution with id {0} failed'.format(
                    execution.id))
    return False
コード例 #18
0
 def splitext(filename):
     # not using os.path.splitext as it would return .gz instead of
     # .tar.gz
     if filename.endswith('.tar.gz'):
         return '.tar.gz'
     elif filename.endswith('.exe'):
         return '.exe'
     else:
         ctx.abort_operation(
             'Unknown agent format for {0}. '
             'Must be either tar.gz or exe'.format(filename))
コード例 #19
0
def check_index_exists(url, index_name='cloudify_storage'):
    """Check that the cloudify_storage ES index exists."""
    index_url = urlparse.urljoin(url, index_name)
    try:
        return urllib2.urlopen(index_url)
    except urllib2.URLError as e:
        if e.code == 404:
            ctx.abort_operation(
                'The index {0} does not exist in ES'.format(index_name))
        else:
            ctx.abort_operation('Invalid ES response: {0}'.format(e))
コード例 #20
0
 def splitext(filename):
     # not using os.path.splitext as it would return .gz instead of
     # .tar.gz
     if filename.endswith('.tar.gz'):
         return '.tar.gz'
     elif filename.endswith('.exe'):
         return '.exe'
     else:
         ctx.abort_operation(
             'Unknown agent format for {0}. '
             'Must be either tar.gz or exe'.format(filename))
コード例 #21
0
def _load_credentials_file(path):
    with open(path) as credentials_file:
        try:
            return yaml.safe_load(credentials_file.read())
        except IOError as ex:
            ctx.abort_operation(
                'Credentials file {0} is not accessible ({1})'.format(
                    path, ex))
        except (yaml.parser.ParserError, yaml.scanner.ScannerError) as ex:
            ctx.abort_operation('{0} must be a valid YAML file ({1})'.format(
                path, ex))
コード例 #22
0
def check_index_exists(url, index_name='cloudify_storage'):
    """Check that the cloudify_storage ES index exists."""
    index_url = urlparse.urljoin(url, index_name)
    try:
        return urllib2.urlopen(index_url)
    except urllib2.URLError as e:
        if e.code == 404:
            ctx.abort_operation('The index {0} does not exist in ES'.format(
                index_name))
        else:
            ctx.abort_operation('Invalid ES response: {0}'.format(e))
コード例 #23
0
def restore_upgrade_data(es_endpoint_ip, es_endpoint_port):
    bulk_endpoint = 'http://{0}:{1}/_bulk'.format(es_endpoint_ip,
                                                  es_endpoint_port)
    with open(DUMP_FILE_PATH) as f:
        all_data = ''
        for line in f.readlines():
            all_data += _create_index_request(line)
    ctx.logger.info('Restoring elasticsearch data')
    res = http_request(url=bulk_endpoint, data=all_data, method='POST')
    if res.code != 200:
        ctx.abort_operation('Failed restoring elasticsearch data.')
    ctx.logger.info('Elasticsearch data was successfully restored')
コード例 #24
0
def restore_upgrade_data(es_endpoint_ip, es_endpoint_port):
    bulk_endpoint = 'http://{0}:{1}/_bulk'.format(es_endpoint_ip,
                                                  es_endpoint_port)
    with open(DUMP_FILE_PATH) as f:
        all_data = ''
        for line in f.readlines():
            all_data += _create_index_request(line)
    ctx.logger.info('Restoring elasticsearch data')
    res = http_request(url=bulk_endpoint, data=all_data, method='POST')
    if res.code != 200:
        ctx.abort_operation('Failed restoring elasticsearch data.')
    ctx.logger.info('Elasticsearch data was successfully restored')
コード例 #25
0
def restore_upgrade_data(es_endpoint_ip, es_endpoint_port):
    bulk_endpoint = 'http://{0}:{1}/_bulk'.format(es_endpoint_ip,
                                                  es_endpoint_port)
    all_data = ''
    with open(DUMP_FILE_PATH) as f:
        for line in f:
            element = _update_element_if_required(json.loads(line))
            all_data += _create_element_request(element)
    ctx.logger.info('Restoring elasticsearch data')
    res = utils.http_request(url=bulk_endpoint, data=all_data, method='POST')
    if not res.code == 200:
        ctx.abort_operation('Failed restoring elasticsearch data.')
    ctx.logger.info('Elasticsearch data was successfully restored')
コード例 #26
0
def _delete_sanity_blueprint():
    if not _is_sanity_blueprint_exist():
        return
    headers = utils.create_maintenance_headers()
    resp = utils.http_request(
        '{0}/blueprints/{1}'.format(_get_url_prefix(), BLUEPRINT_ID),
        method='DELETE',
        headers=headers)

    if resp.code != 200:
        ctx.abort_operation('Failed deleting '
                            'deployment {0}: {1}'.format(BLUEPRINT_ID,
                                                         resp.reason))
コード例 #27
0
def _delete_sanity_blueprint():
    if not _is_sanity_blueprint_exist():
        return
    headers = utils.create_maintenance_headers()
    resp = utils.http_request('{0}/blueprints/{1}'.format(
        _get_url_prefix(), BLUEPRINT_ID),
                              method='DELETE',
                              headers=headers)

    if resp.code != 200:
        ctx.abort_operation('Failed deleting '
                            'deployment {0}: {1}'.format(
                                BLUEPRINT_ID, resp.reason))
コード例 #28
0
def restore_upgrade_data(es_endpoint_ip, es_endpoint_port):
    bulk_endpoint = 'http://{0}:{1}/_bulk'.format(es_endpoint_ip,
                                                  es_endpoint_port)
    all_data = ''
    with open(DUMP_FILE_PATH) as f:
        for line in f:
            element = _update_element_if_required(json.loads(line))
            all_data += _create_element_request(element)
    ctx.logger.info('Restoring elasticsearch data')
    res = utils.http_request(url=bulk_endpoint, data=all_data, method='POST')
    if not res.code == 200:
        ctx.abort_operation('Failed restoring elasticsearch data.')
    ctx.logger.info('Elasticsearch data was successfully restored')
コード例 #29
0
def verify_elasticsearch_running(url):
    """Check that ES is running, and that it contains the provider context.

    This is a sanity check that the manager we're upgrading was bootstrapped
    correctly.
    """
    provider_context_url = urlparse.urljoin(url, 'cloudify_storage/'
                                                 'provider_context/CONTEXT')
    try:
        urllib2.urlopen(provider_context_url)
    except urllib2.URLError as e:
        ctx.abort_operation('ES returned an error when getting the provider '
                            'context: {0}'.format(e))
        raise
コード例 #30
0
def verify_properties():
    """Compare node properties and decide if upgrading is allowed."""
    changed = utils.changed_upgrade_properties(ES_SERVICE_NAME)

    if 'es_heap_size' in changed:
        bootstrap_heap_size, upgrade_heap_size = changed['es_heap_size']
        bootstrap_heap_size = utils.parse_jvm_heap_size(bootstrap_heap_size)
        upgrade_heap_size = utils.parse_jvm_heap_size(upgrade_heap_size)

        if upgrade_heap_size < bootstrap_heap_size:
            ctx.abort_operation('Upgrading a Cloudify Manager with '
                                'Elasticsearch Heap Size lower than what it '
                                'was initially bootstrapped with is not '
                                'allowed.')
コード例 #31
0
def verify_properties():
    """Compare node properties and decide if upgrading is allowed."""
    changed = utils.changed_upgrade_properties(ES_SERVICE_NAME)

    if 'es_heap_size' in changed:
        bootstrap_heap_size, upgrade_heap_size = changed['es_heap_size']
        bootstrap_heap_size = utils.parse_jvm_heap_size(bootstrap_heap_size)
        upgrade_heap_size = utils.parse_jvm_heap_size(upgrade_heap_size)

        if upgrade_heap_size < bootstrap_heap_size:
            ctx.abort_operation('Upgrading a Cloudify Manager with '
                                'Elasticsearch Heap Size lower than what it '
                                'was initially bootstrapped with is not '
                                'allowed.')
コード例 #32
0
def verify_elasticsearch_running(url):
    """Check that ES is running, and that it contains the provider context.

    This is a sanity check that the manager we're upgrading was bootstrapped
    correctly.
    """
    provider_context_url = urlparse.urljoin(url, 'cloudify_storage/'
                                                 'provider_context/CONTEXT')
    try:
        urllib2.urlopen(provider_context_url)
    except urllib2.URLError as e:
        ctx.abort_operation('ES returned an error when getting the provider '
                            'context: {0}'.format(e))
        raise
コード例 #33
0
def configure_logstash():

    logstash_conf_path = '/etc/logstash/conf.d'

    runtime_properties = ctx.instance.runtime_properties
    rabbitmq_username = runtime_properties.get('rabbitmq_username')
    rabbitmq_password = runtime_properties.get('rabbitmq_password')

    # Confirm username and password have been supplied for broker before
    # continuing.
    # Components other than logstash and riemann have this handled in code.
    # Note that these are not directly used in this script, but are used by the
    # deployed resources, hence the check here.
    if not rabbitmq_username or not rabbitmq_password:
        ctx.abort_operation(
            'Both rabbitmq_username and rabbitmq_password must be supplied '
            'and at least 1 character long in the manager blueprint inputs.')

    ctx.logger.info('Deploying Logstash configuration...')
    utils.deploy_blueprint_resource(
        '{0}/logstash.conf'.format(CONFIG_PATH),
        '{0}/logstash.conf'.format(logstash_conf_path),
        LOGSTASH_SERVICE_NAME)

    # Due to a bug in the handling of configuration files,
    # configuration files with the same name cannot be deployed.
    # Since the logrotate config file is called `logstash`,
    # we change the name of the logstash env vars config file
    # from logstash to cloudify-logstash to be consistent with
    # other service env var files.
    init_file = '/etc/init.d/logstash'
    utils.replace_in_file(
        'sysconfig/\$name',
        'sysconfig/cloudify-$name',
        init_file)
    utils.chmod('755', init_file)
    utils.chown('root', 'root', init_file)

    ctx.logger.debug('Deploying Logstash sysconfig...')
    utils.deploy_blueprint_resource(
        '{0}/cloudify-logstash'.format(CONFIG_PATH),
        '/etc/sysconfig/cloudify-logstash',
        LOGSTASH_SERVICE_NAME)

    utils.logrotate(LOGSTASH_SERVICE_NAME)
    utils.sudo(['/sbin/chkconfig', 'logstash', 'on'])
    utils.clean_var_log_dir(LOGSTASH_SERVICE_NAME)
コード例 #34
0
def _deploy_external_cert():
    external_cert_deployed, external_key_deployed = _deploy_cert_and_key(
        'rest_certificate', 'rest_key',
        utils.EXTERNAL_CERT_PATH, utils.EXTERNAL_KEY_PATH)

    if external_key_deployed and external_cert_deployed:
        ctx.logger.info(
            'Deployed user-provided external SSL certificate and private key')
    elif not external_cert_deployed and not external_key_deployed:
        _generate_external_cert()
    else:
        what_deployed = 'cert' if external_cert_deployed else 'key'
        ctx.abort_operation('Either both the external cert and the external '
                            'key must be provided, or neither. Only the {0} '
                            'was provided'.format(what_deployed))

    src_runtime_props['external_cert_path'] = utils.EXTERNAL_CERT_PATH
    src_runtime_props['external_key_path'] = utils.EXTERNAL_KEY_PATH
コード例 #35
0
def _delete_linode(resource_id, credentials):
    ctx.logger.info('Destroying linode...')
    linode = _get_linode(resource_id, credentials)
    if linode:
        linode.destroy()
        _assert_completed(linode)
    if _get_linode(resource_id, credentials):
        raise ctx.abort_operation('Linode not destroyed')
    else:
        ctx.logger.info('Linode destroyed successfully')
コード例 #36
0
def _configure_influxdb(host, port):
    db_user = "******"
    db_pass = "******"
    db_name = "cloudify"

    ctx.logger.info('Creating InfluxDB Database...')

    # the below request is equivalent to running:
    # curl -S -s "http://localhost:8086/db?u=root&p=root" '-d "{\"name\": \"cloudify\"}"  # NOQA
    import urllib
    import urllib2

    endpoint = 'http://{0}:{1}/db'.format(host, port)
    params = urllib.urlencode(dict(u=db_user, p=db_pass))
    data = {'name': db_name}
    url = endpoint + '?' + params

    # check if db already exists
    db_list = eval(urllib2.urlopen(urllib2.Request(url)).read())
    try:
        assert not any(d.get('name') == db_name for d in db_list)
    except AssertionError:
        ctx.logger.info('Database {0} already exists!'.format(db_name))
        return

    ctx.logger.info('Request is: {0} \'{1}\''.format(url, data))

    try:
        urllib2.urlopen(urllib2.Request(url, json.dumps(data)))
    except Exception as ex:
        msg = 'Failed to create: {0} ({1}).'.format(db_name, ex)
        ctx.abort_operation(msg)

    # verify db created
    ctx.logger.info('Verifying database create successfully...')
    db_list = eval(urllib2.urlopen(urllib2.Request(url)).read())
    try:
        assert any(d.get('name') == db_name for d in db_list)
    except AssertionError:
        msg = 'Verification failed!'
        ctx.abort_operation(msg)
    ctx.logger.info('Databased {0} created successfully.'.format(db_name))
コード例 #37
0
def _configure_influxdb(host, port):
    db_user = "******"
    db_pass = "******"
    db_name = "cloudify"

    ctx.logger.info('Creating InfluxDB Database...')

    # the below request is equivalent to running:
    # curl -S -s "http://localhost:8086/db?u=root&p=root" '-d "{\"name\": \"cloudify\"}"  # NOQA
    import urllib
    import urllib2

    endpoint = 'http://{0}:{1}/db'.format(host, port)
    params = urllib.urlencode(dict(u=db_user, p=db_pass))
    data = {'name': db_name}
    url = endpoint + '?' + params

    # check if db already exists
    db_list = eval(urllib2.urlopen(urllib2.Request(url)).read())
    try:
        assert not any(d.get('name') == db_name for d in db_list)
    except AssertionError:
        ctx.logger.info('Database {0} already exists!'.format(db_name))
        return

    ctx.logger.info('Request is: {0} \'{1}\''.format(url, data))

    try:
        urllib2.urlopen(urllib2.Request(url, json.dumps(data)))
    except Exception as ex:
        msg = 'Failed to create: {0} ({1}).'.format(db_name, ex)
        ctx.abort_operation(msg)

    # verify db created
    ctx.logger.info('Verifying database create successfully...')
    db_list = eval(urllib2.urlopen(urllib2.Request(url)).read())
    try:
        assert any(d.get('name') == db_name for d in db_list)
    except AssertionError:
        msg = 'Verification failed!'
        ctx.abort_operation(msg)
    ctx.logger.info('Databased {0} created successfully.'.format(db_name))
コード例 #38
0
def validate():
    ignore_validations = ctx.node.properties['ignore_bootstrap_validations']
    resources_package_url = ctx.node.properties['manager_resources_package']
    physical_memory = \
        ctx.node.properties['minimum_required_total_physical_memory_in_mb']
    disk_space = \
        ctx.node.properties['minimum_required_available_disk_space_in_gb']
    heap_size_gap = ctx.node.properties['allowed_heap_size_gap_in_mb']

    error_summary = []

    error_summary.append(
        _validate_python_version(expected_major_version=2,
                                 expected_minor_version=7))
    error_summary.append(
        _validate_supported_distros(supported_distros=('centos', 'redhat'),
                                    supported_versions=('7')))
    error_summary.append(
        _validate_sufficient_memory(min_memory_required_in_mb=physical_memory))
    error_summary.append(
        _validate_sufficient_disk_space(
            min_disk_space_required_in_gb=disk_space))
    # memory validation for es is only relevant during bootstrap for now.
    if _is_bootstrap():
        # remove last character as it contains the `g` or `m`.
        es_heap_size = ctx.node.properties['es_heap_size']
        error_summary.append(
            _validate_es_heap_size(es_heap_size=es_heap_size,
                                   allowed_gap_in_mb=heap_size_gap))
    if resources_package_url:
        error_summary.append(
            _validate_resources_package_url(resources_package_url))

    # if no error occurred in a validation, we need to remove its reference.
    error_summary = [error for error in error_summary if error]
    if error_summary:
        printable_error_summary = '\n' + '\n\t'.join(error_summary)
        if ignore_validations:
            ctx.logger.warn('Ignoring validation errors. {0}'.format(
                printable_error_summary))
        else:
            ctx.abort_operation(printable_error_summary)
コード例 #39
0
def _assert_deployment_monitoring_data_exists():
    influx_props = utils.ctx_factory.get('influxdb')
    influx_host_ip = influx_props.get('influxdb_endpoint_ip')

    if influx_host_ip == '':
        influx_host_ip = 'localhost'
    influx_user = '******'
    influx_pass = '******'

    query = 'select * from /^{0}\./i ' \
            'where time > now() - 5s'.format(DEPLOYMENT_ID)

    params = urllib.urlencode(
            dict(u=influx_user,
                 p=influx_pass,
                 q=query))

    endpoint = 'http://{0}:8086/db/cloudify/series'.format(influx_host_ip)
    url = endpoint + '?' + params

    resp = utils.http_request(url, method='GET', timeout=30)

    if not resp:
        ctx.abort_operation("Can't connect to influxdb")
    if resp.code != 200:
        ctx.abort_operation('Received invalid response from the '
                            'monitoring service: {0}'.format(resp.reason))

    resp_content = resp.readlines()
    json_resp = json.loads(resp_content[0])
    if not json_resp:
        ctx.abort_operation('No monitoring data received')
コード例 #40
0
def _assert_deployment_monitoring_data_exists():
    influx_props = utils.ctx_factory.get('influxdb')
    influx_host_ip = influx_props.get('influxdb_endpoint_ip')

    if influx_host_ip == '':
        influx_host_ip = 'localhost'
    influx_user = '******'
    influx_pass = '******'

    query = 'select * from /^{0}\./i ' \
            'where time > now() - 5s'.format(DEPLOYMENT_ID)

    params = urllib.urlencode(dict(u=influx_user, p=influx_pass, q=query))

    endpoint = 'http://{0}:8086/db/cloudify/series'.format(influx_host_ip)
    url = endpoint + '?' + params

    resp = utils.http_request(url, method='GET', timeout=30)

    if not resp:
        ctx.abort_operation("Can't connect to influxdb")
    if resp.code != 200:
        ctx.abort_operation('Received invalid response from the '
                            'monitoring service: {0}'.format(resp.reason))

    resp_content = resp.readlines()
    json_resp = json.loads(resp_content[0])
    if not json_resp:
        ctx.abort_operation('No monitoring data received')
コード例 #41
0
def main():

    es_endpoint_ip = ctx_properties['es_endpoint_ip']
    es_endpoint_port = ctx_properties['es_endpoint_port']

    if utils.is_upgrade:
        dump_upgrade_data()

    if not es_endpoint_ip:
        es_endpoint_ip = ctx.instance.host_ip
        _install_elasticsearch()
        utils.systemd.restart(ES_SERVICE_NAME, append_prefix=False)
        utils.wait_for_port(es_endpoint_port, es_endpoint_ip)
        _configure_elasticsearch(host=es_endpoint_ip, port=es_endpoint_port)
        _wait_for_shards(es_endpoint_port, es_endpoint_ip)

        utils.clean_var_log_dir('elasticsearch')
    else:
        ctx.logger.info('External Elasticsearch Endpoint provided: '
                        '{0}:{1}...'.format(es_endpoint_ip, es_endpoint_port))
        time.sleep(5)
        utils.wait_for_port(es_endpoint_port, es_endpoint_ip)
        ctx.logger.info('Checking if \'cloudify_storage\' '
                        'index already exists...')

        if http_request('http://{0}:{1}/cloudify_storage'.format(
                es_endpoint_ip, es_endpoint_port),
                        method='HEAD').code == 200:
            ctx.abort_operation(
                '\'cloudify_storage\' index already exists on '
                '{0}, terminating bootstrap...'.format(es_endpoint_ip))
        _configure_elasticsearch(host=es_endpoint_ip, port=es_endpoint_port)

    if utils.is_upgrade or utils.is_rollback:
        restore_upgrade_data(es_endpoint_ip, es_endpoint_port)

    if not es_endpoint_port:
        utils.systemd.stop(ES_SERVICE_NAME, append_prefix=False)

    ctx.instance.runtime_properties['es_endpoint_ip'] = es_endpoint_ip
コード例 #42
0
def validate():
    ignore_validations = ctx.node.properties['ignore_bootstrap_validations']
    resources_package_url = ctx.node.properties['manager_resources_package']
    physical_memory = \
        ctx.node.properties['minimum_required_total_physical_memory_in_mb']
    disk_space = \
        ctx.node.properties['minimum_required_available_disk_space_in_gb']
    heap_size_gap = ctx.node.properties['allowed_heap_size_gap_in_mb']

    error_summary = []

    error_summary.append(_validate_python_version(
        expected_major_version=2, expected_minor_version=7))
    error_summary.append(_validate_supported_distros(
        supported_distros=('centos', 'redhat'),
        supported_versions=('7')))
    error_summary.append(_validate_sufficient_memory(
        min_memory_required_in_mb=physical_memory))
    error_summary.append(_validate_sufficient_disk_space(
        min_disk_space_required_in_gb=disk_space))
    # memory validation for es is only relevant during bootstrap for now.
    if _is_bootstrap():
        # remove last character as it contains the `g` or `m`.
        es_heap_size = ctx.node.properties['es_heap_size']
        error_summary.append(_validate_es_heap_size(
            es_heap_size=es_heap_size, allowed_gap_in_mb=heap_size_gap))
    if resources_package_url:
        error_summary.append(_validate_resources_package_url(
            resources_package_url))

    # if no error occurred in a validation, we need to remove its reference.
    error_summary = [error for error in error_summary if error]
    if error_summary:
        printable_error_summary = '\n' + '\n'.join(error_summary)
        if ignore_validations:
            ctx.logger.warn('Ignoring validation errors. {0}'.format(
                printable_error_summary))
        else:
            ctx.abort_operation(printable_error_summary)
コード例 #43
0
def main():

    es_endpoint_ip = ctx_properties['es_endpoint_ip']
    es_endpoint_port = ctx_properties['es_endpoint_port']

    if utils.is_upgrade:
        dump_upgrade_data()

    if not es_endpoint_ip:
        es_endpoint_ip = ctx.instance.host_ip
        _install_elasticsearch()
        utils.systemd.restart(ES_SERVICE_NAME, append_prefix=False)
        utils.wait_for_port(es_endpoint_port, es_endpoint_ip)
        _configure_elasticsearch(host=es_endpoint_ip, port=es_endpoint_port)
        _wait_for_shards(es_endpoint_port, es_endpoint_ip)

        utils.clean_var_log_dir('elasticsearch')
    else:
        ctx.logger.info('External Elasticsearch Endpoint provided: '
                        '{0}:{1}...'.format(es_endpoint_ip, es_endpoint_port))
        time.sleep(5)
        utils.wait_for_port(es_endpoint_port, es_endpoint_ip)
        ctx.logger.info('Checking if \'cloudify_storage\' '
                        'index already exists...')

        if http_request('http://{0}:{1}/cloudify_storage'.format(
                es_endpoint_ip, es_endpoint_port), method='HEAD').code == 200:
            ctx.abort_operation('\'cloudify_storage\' index already exists on '
                                '{0}, terminating bootstrap...'.format(
                                    es_endpoint_ip))
        _configure_elasticsearch(host=es_endpoint_ip, port=es_endpoint_port)

    if utils.is_upgrade or utils.is_rollback:
        restore_upgrade_data(es_endpoint_ip, es_endpoint_port)

    if not es_endpoint_port:
        utils.systemd.stop(ES_SERVICE_NAME, append_prefix=False)

    ctx.instance.runtime_properties['es_endpoint_ip'] = es_endpoint_ip
コード例 #44
0
def _wait_for_shards(port, ip):
    """Wait for activation of all available shards in Elasticsearch.

    After Elasticsearch is installed and first time started there is short time
    when shards, if created, are not started. If someone would access ES during
    that time (e.g. by creating snapshot) he will get error
    'SearchPhaseExecutionException[Failed to execute phase [init_scan], all
    shards failed]'.

    :param port: Elasticsearch port
    :param ip: Ip to Elasticsearch
    """
    ctx.logger.info('Waiting for shards to be active...')
    shards_check_timeout = 60
    start = time.time()

    url = 'http://{ip}:{port}/*/_search_shards'.format(ip=ip, port=port)
    while True:
        all_shards_started = True
        try:
            out = urllib2.urlopen(url)
            shards = json.load(out)['shards']
            for shard in shards:
                all_shards_started = all_shards_started and \
                    (shard[0]['state'] == 'STARTED')
        except urllib2.URLError as e:
            ctx.abort_operation('Failed to retrieve information about '
                                'Elasticsearch shards: {0}'.format(e.reason))

        if all_shards_started:
            return
        time.sleep(1)
        if time.time() - start > shards_check_timeout:
            inactive = [s[0] for s in shards if s[0]['state'] != 'STARTED']
            ctx.abort_operation('Elasticsearch shards check timed out. '
                                'Inactive shards: {0}'.format(inactive))
コード例 #45
0
def _wait_for_shards(port, ip):
    """Wait for activation of all available shards in Elasticsearch.

    After Elasticsearch is installed and first time started there is short time
    when shards, if created, are not started. If someone would access ES during
    that time (e.g. by creating snapshot) he will get error
    'SearchPhaseExecutionException[Failed to execute phase [init_scan], all
    shards failed]'.

    :param port: Elasticsearch port
    :param ip: Ip to Elasticsearch
    """
    ctx.logger.info('Waiting for shards to be active...')
    shards_check_timeout = 60
    start = time.time()

    url = 'http://{ip}:{port}/*/_search_shards'.format(ip=ip, port=port)
    while True:
        all_shards_started = True
        try:
            out = urllib2.urlopen(url)
            shards = json.load(out)['shards']
            for shard in shards:
                all_shards_started = all_shards_started and \
                    (shard[0]['state'] == 'STARTED')
        except urllib2.URLError as e:
            ctx.abort_operation('Failed to retrieve information about '
                                'Elasticsearch shards: {0}'.format(e.reason))

        if all_shards_started:
            return
        time.sleep(1)
        if time.time() - start > shards_check_timeout:
            inactive = [s[0] for s in shards if s[0]['state'] != 'STARTED']
            ctx.abort_operation('Elasticsearch shards check timed out. '
                                'Inactive shards: {0}'.format(inactive))
コード例 #46
0
ファイル: start.py プロジェクト: ptanX/cloudify
def _assert_logs_and_events(execution_id):
    headers = utils.create_maintenance_headers()
    params = urllib.urlencode((
        ('execution_id', execution_id),
        ('type', 'cloudify_event'),
        ('type', 'cloudify_log'),
        ('_sort', '@timestamp'),
        ('_size', 100),
        ('_offset', 0),
    ))

    endpoint = '{0}/events'.format(_get_url_prefix())
    url = endpoint + '?' + params
    ctx.logger.debug('Sending request to url: {0}, with the following '
                     'headers: {1}'.format(url, headers))
    resp = utils.http_request(url, method='GET', headers=headers, timeout=30)
    if not resp:
        ctx.abort_operation("Can't connect to Cloudify's rest service")
    if resp.code != 200:
        ctx.abort_operation('Failed to retrieve logs/events')

    resp_content = resp.readlines()
    json_resp = json.loads(resp_content[0])

    if 'items' not in json_resp or not json_resp['items']:
        ctx.logger.debug('No items received. The response is: '
                         '{0}'.format(json_resp))
        ctx.abort_operation('No logs/events received')

    db_name = 'cloudify_db'
    for table_name in ['logs', 'events']:
        proc = utils.run([
            'sudo',
            '-u',
            'postgres',
            'psql',
            db_name,
            '-t',
            '-c',
            'SELECT COUNT(*) FROM {0}'.format(table_name),
        ])
        count = int(proc.aggr_stdout)
        if count <= 0:
            ctx.abort_operation(
                'Failed to retrieve {0} from PostgreSQL'.format(table_name))
コード例 #47
0
def verify_restservice(url):
    """To verify that the REST service is working, GET the blueprints list.

    There's nothing special about the blueprints endpoint, it's simply one
    that also requires the storage backend to be up, so if it works, there's
    a good chance everything is configured correctly.
    """
    if utils.is_upgrade or utils.is_rollback:
        # if we're doing an upgrade, we're in maintenance mode - this request
        # is safe to perform in maintenance mode, so let's bypass the check
        headers = utils.create_maintenance_headers()
    else:
        headers = utils.get_auth_headers(True)
        headers['tenant'] = 'default_tenant'

    utils.verify_service_http(REST_SERVICE_NAME, url, headers=headers)

    blueprints_url = urlparse.urljoin(url, 'api/v2.1/blueprints')
    req = urllib2.Request(blueprints_url, headers=headers)

    try:
        response = urllib2.urlopen(req)
    # keep an erroneous HTTP response to examine its status code, but still
    # abort on fatal errors like being unable to connect at all
    except urllib2.HTTPError as e:
        response = e
    except urllib2.URLError as e:
        ctx.abort_operation(
            'REST service returned an invalid response: {0}'.format(e))
    if response.code == 401:
        ctx.abort_operation('Could not connect to the REST service: '
                            '401 unauthorized. Possible access control '
                            'misconfiguration')
    if response.code != 200:
        ctx.abort_operation(
            'REST service returned an unexpected response: {0}'.format(
                response.code))

    try:
        json.load(response)
    except ValueError as e:
        ctx.abort_operation(
            'REST service returned malformed JSON: {0}'.format(e))
コード例 #48
0
def verify_restservice(url):
    """To verify that the REST service is working, GET the blueprints list.

    There's nothing special about the blueprints endpoint, it's simply one
    that also requires the storage backend to be up, so if it works, there's
    a good chance everything is configured correctly.
    """
    if utils.is_upgrade or utils.is_rollback:
        # if we're doing an upgrade, we're in maintenance mode - this request
        # is safe to perform in maintenance mode, so let's bypass the check
        headers = utils.create_maintenance_headers()
    else:
        headers = utils.get_auth_headers(True)
        headers['tenant'] = 'default_tenant'

    utils.verify_service_http(SERVICE_NAME, url, headers=headers)

    blueprints_url = urlparse.urljoin(url, 'api/v2.1/blueprints')
    req = urllib2.Request(blueprints_url, headers=headers)

    try:
        response = urllib2.urlopen(req)
    # keep an erroneous HTTP response to examine its status code, but still
    # abort on fatal errors like being unable to connect at all
    except urllib2.HTTPError as e:
        response = e
    except urllib2.URLError as e:
        ctx.abort_operation('REST service returned an invalid response: {0}'
                            .format(e))
    if response.code == 401:
        ctx.abort_operation('Could not connect to the REST service: '
                            '401 unauthorized. Possible access control '
                            'misconfiguration')
    if response.code != 200:
        ctx.abort_operation('REST service returned an unexpected response: {0}'
                            .format(response.code))

    try:
        json.load(response)
    except ValueError as e:
        ctx.abort_operation('REST service returned malformed JSON: {0}'
                            .format(e))
コード例 #49
0
def verify_restservice(url):
    """To verify that the REST service is working, GET the blueprints list.

    There's nothing special about the blueprints endpoint, it's simply one
    that also requires the storage backend to be up, so if it works, there's
    a good chance everything is configured correctly.
    """
    security_config = runtime_props['security_configuration']
    headers = utils.get_auth_headers(
        username=security_config['admin_username'],
        password=security_config['admin_password'])

    utils.verify_service_http(SERVICE_NAME, url, headers=headers)

    blueprints_url = urlparse.urljoin(url, 'api/v2.1/blueprints')
    req = urllib2.Request(blueprints_url, headers=headers)

    try:
        response = urllib2.urlopen(req)
    # keep an erroneous HTTP response to examine its status code, but still
    # abort on fatal errors like being unable to connect at all
    except urllib2.HTTPError as e:
        response = e
    except urllib2.URLError as e:
        ctx.abort_operation(
            'REST service returned an invalid response: {0}'.format(e))
    if response.code == 401:
        ctx.abort_operation('Could not connect to the REST service: '
                            '401 unauthorized. Possible access control '
                            'misconfiguration')
    if response.code != 200:
        ctx.abort_operation(
            'REST service returned an unexpected response: {0}'.format(
                response.code))

    try:
        json.load(response)
    except ValueError as e:
        ctx.abort_operation(
            'REST service returned malformed JSON: {0}'.format(e))
コード例 #50
0
def _assert_logs_and_events(execution_id):
    headers = utils.create_maintenance_headers()
    params = urllib.urlencode((
        ('execution_id', execution_id),
        ('type', 'cloudify_event'),
        ('type', 'cloudify_log'),
        ('_sort', '@timestamp'),
        ('_size', 100),
        ('_offset', 0),
    ))

    endpoint = '{0}/events'.format(_get_url_prefix())
    url = endpoint + '?' + params
    ctx.logger.debug('Sending request to url: {0}, with the following '
                     'headers: {1}'.format(url, headers))
    resp = utils.http_request(url, method='GET', headers=headers, timeout=30)
    if not resp:
        ctx.abort_operation("Can't connect to Cloudify's rest service")
    if resp.code != 200:
        ctx.abort_operation('Failed to retrieve logs/events')

    resp_content = resp.readlines()
    json_resp = json.loads(resp_content[0])

    if 'items' not in json_resp or not json_resp['items']:
        ctx.logger.debug('No items received. The response is: '
                         '{0}'.format(json_resp))
        ctx.abort_operation('No logs/events received')

    db_name = 'cloudify_db'
    for table_name in ['logs', 'events']:
        proc = utils.run([
            'sudo', '-u', 'postgres',
            'psql', db_name, '-t', '-c',
            'SELECT COUNT(*) FROM {0}'.format(table_name),
        ])
        count = int(proc.aggr_stdout)
        if count <= 0:
            ctx.abort_operation(
                'Failed to retrieve {0} from PostgreSQL'.format(table_name))
コード例 #51
0
def _assert_logs_and_events(execution_id):
    headers = utils.create_maintenance_headers()
    params = urllib.urlencode(
            dict(execution_id=execution_id,
                 type='cloudify_log'))

    endpoint = '{0}/events'.format(_get_url_prefix())
    url = endpoint + '?' + params
    resp = utils.http_request(url, method='GET', headers=headers, timeout=30)
    if not resp:
        ctx.abort_operation("Can't connect to elasticsearch")
    if resp.code != 200:
        ctx.abort_operation('Failed to retrieve logs/events')

    resp_content = resp.readlines()
    json_resp = json.loads(resp_content[0])

    if 'items' not in json_resp or not json_resp['items']:
        ctx.abort_operation('No logs/events received')
コード例 #52
0
def install_logstash():

    logstash_unit_override = '/etc/systemd/system/logstash.service.d'

    logstash_source_url = ctx_properties['logstash_rpm_source_url']

    rabbitmq_username = ctx_properties['rabbitmq_username']
    rabbitmq_password = ctx_properties['rabbitmq_password']

    logstash_log_path = '/var/log/cloudify/logstash'
    logstash_conf_path = '/etc/logstash/conf.d'

    # injected as an input to the script
    ctx.instance.runtime_properties['es_endpoint_ip'] = \
        os.environ['ES_ENDPOINT_IP']
    elasticsearch_props = utils.ctx_factory.get('elasticsearch')
    ctx.instance.runtime_properties['es_endpoint_port'] = \
        elasticsearch_props['es_endpoint_port']

    rabbit_props = utils.ctx_factory.get('rabbitmq')
    ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \
        utils.get_rabbitmq_endpoint_ip(
            rabbit_props.get('rabbitmq_endpoint_ip'))
    ctx.instance.runtime_properties['rabbitmq_username'] = \
        rabbit_props['rabbitmq_username']
    ctx.instance.runtime_properties['rabbitmq_password'] = \
        rabbit_props['rabbitmq_password']

    # Confirm username and password have been supplied for broker before
    # continuing.
    # Components other than logstash and riemann have this handled in code.
    # Note that these are not directly used in this script, but are used by the
    # deployed resources, hence the check here.
    if not rabbitmq_username or not rabbitmq_password:
        ctx.abort_operation(
            'Both rabbitmq_username and rabbitmq_password must be supplied '
            'and at least 1 character long in the manager blueprint inputs.')

    ctx.logger.info('Installing Logstash...')
    utils.set_selinux_permissive()
    utils.copy_notice(LOGSTASH_SERVICE_NAME)

    utils.yum_install(logstash_source_url, service_name=LOGSTASH_SERVICE_NAME)

    utils.mkdir(logstash_log_path)
    utils.chown('logstash', 'logstash', logstash_log_path)

    ctx.logger.info('Creating systemd unit override...')
    utils.mkdir(logstash_unit_override)
    utils.deploy_blueprint_resource(
        '{0}/restart.conf'.format(CONFIG_PATH),
        '{0}/restart.conf'.format(logstash_unit_override),
        LOGSTASH_SERVICE_NAME)
    ctx.logger.info('Deploying Logstash conf...')
    utils.deploy_blueprint_resource(
        '{0}/logstash.conf'.format(CONFIG_PATH),
        '{0}/logstash.conf'.format(logstash_conf_path),
        LOGSTASH_SERVICE_NAME)

    # Due to a bug in the handling of configuration files,
    # configuration files with the same name cannot be deployed.
    # Since the logrotate config file is called `logstash`,
    # we change the name of the logstash env vars config file
    # from logstash to cloudify-logstash to be consistent with
    # other service env var files.
    init_file = '/etc/init.d/logstash'
    utils.replace_in_file(
        'sysconfig/\$name',
        'sysconfig/cloudify-$name',
        init_file)
    utils.chmod('755', init_file)
    utils.chown('root', 'root', init_file)

    ctx.logger.info('Deploying Logstash sysconfig...')
    utils.deploy_blueprint_resource(
        '{0}/cloudify-logstash'.format(CONFIG_PATH),
        '/etc/sysconfig/cloudify-logstash',
        LOGSTASH_SERVICE_NAME)

    utils.logrotate(LOGSTASH_SERVICE_NAME)
    utils.sudo(['/sbin/chkconfig', 'logstash', 'on'])
    utils.clean_var_log_dir(LOGSTASH_SERVICE_NAME)
コード例 #53
0
def install_riemann():
    langohr_source_url = ctx_properties['langohr_jar_source_url']
    daemonize_source_url = ctx_properties['daemonize_rpm_source_url']
    riemann_source_url = ctx_properties['riemann_rpm_source_url']
    # Needed for Riemann's config
    cloudify_resources_url = ctx_properties['cloudify_resources_url']
    rabbitmq_username = ctx_properties['rabbitmq_username']
    rabbitmq_password = ctx_properties['rabbitmq_password']

    riemann_config_path = '/etc/riemann'
    riemann_log_path = '/var/log/cloudify/riemann'
    langohr_home = '/opt/lib'
    extra_classpath = '{0}/langohr.jar'.format(langohr_home)

    # Confirm username and password have been supplied for broker before
    # continuing.
    # Components other than logstash and riemann have this handled in code.
    # Note that these are not directly used in this script, but are used by the
    # deployed resources, hence the check here.
    if not rabbitmq_username or not rabbitmq_password:
        ctx.abort_operation(
            'Both rabbitmq_username and rabbitmq_password must be supplied '
            'and at least 1 character long in the manager blueprint inputs.')

    rabbit_props = utils.ctx_factory.get('rabbitmq')
    ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \
        utils.get_rabbitmq_endpoint_ip(
                rabbit_props.get('rabbitmq_endpoint_ip'))
    ctx.instance.runtime_properties['rabbitmq_username'] = \
        rabbit_props.get('rabbitmq_username')
    ctx.instance.runtime_properties['rabbitmq_password'] = \
        rabbit_props.get('rabbitmq_password')

    ctx.logger.info('Installing Riemann...')
    utils.set_selinux_permissive()

    utils.copy_notice(RIEMANN_SERVICE_NAME)
    utils.mkdir(riemann_log_path)
    utils.mkdir(langohr_home)
    utils.mkdir(riemann_config_path)
    utils.mkdir('{0}/conf.d'.format(riemann_config_path))

    langohr = utils.download_cloudify_resource(langohr_source_url,
                                               RIEMANN_SERVICE_NAME)
    utils.sudo(['cp', langohr, extra_classpath])
    ctx.logger.info('Applying Langohr permissions...')
    utils.sudo(['chmod', '644', extra_classpath])
    utils.yum_install(daemonize_source_url, service_name=RIEMANN_SERVICE_NAME)
    utils.yum_install(riemann_source_url, service_name=RIEMANN_SERVICE_NAME)

    utils.logrotate(RIEMANN_SERVICE_NAME)

    ctx.logger.info('Downloading cloudify-manager Repository...')
    manager_repo = utils.download_cloudify_resource(cloudify_resources_url,
                                                    RIEMANN_SERVICE_NAME)
    ctx.logger.info('Extracting Manager Repository...')
    utils.untar(manager_repo, '/tmp')
    ctx.logger.info('Deploying Riemann manager.config...')
    utils.move(
        '/tmp/plugins/riemann-controller/riemann_controller/resources/manager.config',  # NOQA
        '{0}/conf.d/manager.config'.format(riemann_config_path))

    ctx.logger.info('Deploying Riemann conf...')
    utils.deploy_blueprint_resource(
        '{0}/main.clj'.format(CONFIG_PATH),
        '{0}/main.clj'.format(riemann_config_path),
        RIEMANN_SERVICE_NAME)

    # our riemann configuration will (by default) try to read these environment
    # variables. If they don't exist, it will assume
    # that they're found at "localhost"
    # export MANAGEMENT_IP=""
    # export RABBITMQ_HOST=""

    # we inject the management_ip for both of these to Riemann's systemd
    # config.
    # These should be potentially different
    # if the manager and rabbitmq are running on different hosts.
    utils.systemd.configure(RIEMANN_SERVICE_NAME)
    utils.clean_var_log_dir(RIEMANN_SERVICE_NAME)
コード例 #54
0
#!/usr/bin/env python

from os.path import join, dirname

from cloudify import ctx

ctx.download_resource(
    join('components', 'utils.py'),
    join(dirname(__file__), 'utils.py'))
import utils  # NOQA


java_result = utils.sudo(['java', '-version'], ignore_failures=True)
if java_result.returncode != 0:
    ctx.abort_operation('Java runtime error: java was not installed')
コード例 #55
0
#!/usr/bin/env python

from os.path import join, dirname

from cloudify import ctx

ctx.download_resource(
    join('components', 'utils.py'),
    join(dirname(__file__), 'utils.py'))
import utils  # NOQA


pip_result = utils.sudo(['pip'], ignore_failures=True)
if pip_result.returncode != 0:
    ctx.abort_operation('Python runtime installation error: '
                        'pip was not installed')
コード例 #56
0
    join('components', 'utils.py'),
    join(dirname(__file__), 'utils.py'))
import utils  # NOQA

MGMT_WORKER_SERVICE_NAME = 'mgmtworker'
CELERY_PATH = '/opt/mgmtworker/env/bin/celery'  # also hardcoded in create


@utils.retry(ValueError)
def check_worker_running():
    """Use `celery status` to check if the worker is running."""
    result = utils.sudo([
        'CELERY_WORK_DIR=/opt/mgmtworker/work',
        CELERY_PATH,
        '--config=cloudify.broker_config',
        'status'
    ], ignore_failures=True)
    if result.returncode != 0:
        raise ValueError('celery status: worker not running')


ctx.logger.info('Starting Management Worker Service...')
utils.start_service(MGMT_WORKER_SERVICE_NAME)

utils.systemd.verify_alive(MGMT_WORKER_SERVICE_NAME)

try:
    check_worker_running()
except ValueError:
    ctx.abort_operation('Celery worker failed to start')
コード例 #57
0
def deploy_manager_sources():
    """Deploys all manager sources from a single archive.
    """
    archive_path = ctx_properties['manager_resources_package']
    archive_checksum_path = \
        ctx_properties['manager_resources_package_checksum_file']
    skip_checksum_validation = ctx_properties['skip_checksum_validation']
    agent_archives_path = utils.AGENT_ARCHIVES_PATH
    utils.mkdir(agent_archives_path)
    if archive_path:
        sources_agents_path = os.path.join(
            utils.CLOUDIFY_SOURCES_PATH, 'agents')
        # this will leave this several hundreds of MBs archive on the
        # manager. should find a way to clean it after all operations
        # were completed and bootstrap succeeded as it is not longer
        # necessary
        utils.mkdir(utils.CLOUDIFY_SOURCES_PATH)
        resource_name = os.path.basename(archive_path)
        destination = os.path.join(utils.CLOUDIFY_SOURCES_PATH, resource_name)

        ctx.logger.info('Downloading manager resources package...')
        resources_archive_path = \
            utils.download_cloudify_resource(
                archive_path, NODE_NAME, destination=destination)
        # This would ideally go under utils.download_cloudify_resource but as
        # of now, we'll only be validating the manager resources package.

        if not skip_checksum_validation:
            ctx.logger.info('Validating checksum...')
            skip_if_failed = False
            if not archive_checksum_path:
                skip_if_failed = True
                archive_checksum_path = archive_path + '.md5'
            md5_name = os.path.basename(archive_checksum_path)
            destination = os.path.join(utils.CLOUDIFY_SOURCES_PATH, md5_name)
            resources_archive_md5_path = utils.download_cloudify_resource(
                archive_checksum_path, NODE_NAME, destination=destination)
            if not utils.validate_md5_checksum(resources_archive_path,
                                               resources_archive_md5_path):
                    if skip_if_failed:
                        ctx.logger.warn('Checksum validation failed. '
                                        'Continuing as no checksum file was '
                                        'explicitly provided.')
                    else:
                        ctx.abort_operation(
                            'Failed to validate checksum for {0}'.format(
                                resources_archive_path))
            else:
                ctx.logger.info('Resources Package downloaded successfully...')
        else:
            ctx.logger.info(
                'Skipping resources package checksum validation...')

        utils.untar(
            resources_archive_path,
            utils.CLOUDIFY_SOURCES_PATH,
            skip_old_files=True)

        def splitext(filename):
            # not using os.path.splitext as it would return .gz instead of
            # .tar.gz
            if filename.endswith('.tar.gz'):
                return '.tar.gz'
            elif filename.endswith('.exe'):
                return '.exe'
            else:
                ctx.abort_operation(
                    'Unknown agent format for {0}. '
                    'Must be either tar.gz or exe'.format(filename))

        def normalize_agent_name(filename):
            # this returns the normalized name of an agent upon which our agent
            # installer retrieves agent packages for installation.
            # e.g. Ubuntu-trusty-agent_3.4.0-m3-b392.tar.gz returns
            # ubuntu-trusty-agent
            return filename.split('_', 1)[0].lower()

        def backup_agent_resources(agents_dir):
            ctx.logger.info('Backing up agents in {0}...'.format(agents_dir))
            if not os.path.isdir(utils.AGENTS_ROLLBACK_PATH):
                utils.mkdir(utils.AGENTS_ROLLBACK_PATH)
                utils.copy(agents_dir, utils.AGENTS_ROLLBACK_PATH)

        def restore_agent_resources(agents_dir):
            ctx.logger.info('Restoring agents in {0}'.format(
                utils.AGENTS_ROLLBACK_PATH))
            if os.path.isdir(agents_dir):
                utils.remove(agents_dir)
            utils.mkdir(agents_dir)
            utils.copy(os.path.join(utils.AGENTS_ROLLBACK_PATH, 'agents', '.'),
                       agents_dir)

        manager_scripts_path = os.path.join(
            utils.MANAGER_RESOURCES_HOME, 'packages', 'scripts')
        manager_templates_path = os.path.join(
            utils.MANAGER_RESOURCES_HOME, 'packages', 'templates')
        if utils.is_upgrade:
            backup_agent_resources(agent_archives_path)
            utils.remove(agent_archives_path)
            utils.mkdir(agent_archives_path)
            utils.remove(manager_scripts_path)
            utils.remove(manager_templates_path)
            ctx.logger.info('Upgrading agents...')
        elif utils.is_rollback:
            ctx.logger.info('Restoring agents...')
            restore_agent_resources(agent_archives_path)

        for agent_file in os.listdir(sources_agents_path):

            agent_id = normalize_agent_name(agent_file)
            agent_extension = splitext(agent_file)
            utils.move(
                os.path.join(sources_agents_path, agent_file),
                os.path.join(agent_archives_path, agent_id + agent_extension))
コード例 #58
0
        name='events_queue_message_policy',
        expression='^cloudify-events$',
        policy=events_queue_message_policy
    )
    set_rabbitmq_policy(
        name='metrics_queue_message_policy',
        expression='^amq\.gen.*$',
        policy=metrics_queue_message_policy
    )
    set_rabbitmq_policy(
        name='riemann_deployment_queues_message_ttl',
        expression='^.*-riemann$',
        policy=riemann_deployment_queues_message_ttl
    )

    # rabbitmq restart exits with 143 status code that is valid in this case.
    utils.start_service(RABBITMQ_SERVICE_NAME, ignore_restart_fail=True)
    rabbitmq_endpoint_ip = '127.0.0.1'

    utils.systemd.verify_alive(RABBITMQ_SERVICE_NAME)
    try:
        check_rabbit_running()
    except ValueError:
        ctx.abort_operation('Rabbitmq failed to start')

try:
    check_port_accessible(rabbitmq_endpoint_ip, PORT)
except ValueError:
    ctx.abort_operation('{0} error: port {1}:{2} was not open'.format(
        RABBITMQ_SERVICE_NAME, rabbitmq_endpoint_ip, PORT))
コード例 #59
0
def install_riemann():
    langohr_source_url = ctx_properties['langohr_jar_source_url']
    daemonize_source_url = ctx_properties['daemonize_rpm_source_url']
    riemann_source_url = ctx_properties['riemann_rpm_source_url']

    rabbitmq_username = ctx_properties['rabbitmq_username']
    rabbitmq_password = ctx_properties['rabbitmq_password']

    utils.create_service_user(
        user=RIEMANN_USER,
        group=RIEMANN_GROUP,
        home=utils.CLOUDIFY_HOME_DIR
    )

    riemann_config_path = '/etc/riemann'
    riemann_log_path = '/var/log/cloudify/riemann'
    langohr_home = '/opt/lib'
    extra_classpath = '{0}/langohr.jar'.format(langohr_home)
    riemann_dir = '/opt/riemann'

    # Confirm username and password have been supplied for broker before
    # continuing.
    # Components other than logstash and riemann have this handled in code.
    # Note that these are not directly used in this script, but are used by the
    # deployed resources, hence the check here.
    if not rabbitmq_username or not rabbitmq_password:
        ctx.abort_operation(
            'Both rabbitmq_username and rabbitmq_password must be supplied '
            'and at least 1 character long in the manager blueprint inputs.')

    rabbit_props = utils.ctx_factory.get('rabbitmq')
    runtime_props['rabbitmq_endpoint_ip'] = utils.get_rabbitmq_endpoint_ip()
    runtime_props['rabbitmq_username'] = rabbit_props.get('rabbitmq_username')
    runtime_props['rabbitmq_password'] = rabbit_props.get('rabbitmq_password')

    ctx.logger.info('Installing Riemann...')
    utils.set_selinux_permissive()

    utils.copy_notice(RIEMANN_SERVICE_NAME)
    utils.mkdir(riemann_log_path)
    utils.mkdir(langohr_home)
    utils.mkdir(riemann_config_path)
    utils.mkdir('{0}/conf.d'.format(riemann_config_path))

    # utils.chown cannot be used as it will change both user and group
    utils.sudo(['chown', RIEMANN_USER, riemann_dir])

    langohr = utils.download_cloudify_resource(langohr_source_url,
                                               RIEMANN_SERVICE_NAME)
    utils.sudo(['cp', langohr, extra_classpath])
    ctx.logger.info('Applying Langohr permissions...')
    utils.sudo(['chmod', '644', extra_classpath])
    utils.yum_install(daemonize_source_url, service_name=RIEMANN_SERVICE_NAME)
    utils.yum_install(riemann_source_url, service_name=RIEMANN_SERVICE_NAME)

    utils.chown(RIEMANN_USER, RIEMANN_GROUP, riemann_log_path)

    utils.logrotate(RIEMANN_SERVICE_NAME)

    files_to_remove = [riemann_config_path,
                       riemann_log_path,
                       extra_classpath,
                       riemann_dir]
    runtime_props['files_to_remove'] = files_to_remove