示例#1
0
    def _initialize_client(self):
        '''
            Initialize Apache Ambari client
        '''
        # check not required arguments
        protocol = 'http'
        if self.get_option('protocol'):
            if self.get_option('protocol') == 'https':
                protocol = self.get_option('protocol')

        validate_ssl = False
        if self.get_option('validate_ssl'):
            if self.get_option('validate_ssl') == True:
                validate_ssl = self.get_option('validate_ssl')

        # disable ssl warning
        if validate_ssl == False:
            urllib3.disable_warnings()

        # initiate Apache Ambari client
        self._client = Ambari(self.get_option('hostname'),
                              port=int(self.get_option('port')),
                              username=self.get_option('username'),
                              password=self.get_option('password'),
                              protocol=protocol,
                              validate_ssl=validate_ssl)
示例#2
0
class AmbariClusterManager:

    def __init__(self, module):
        self.client = Ambari(module.params.get('server'), \
            port=module.params.get('port'), \
            username=module.params.get('username'), \
            password=module.params.get('password'))

        self.name = self.module.params.get('name')
        self.blueprint = json.load(file(self.module.params.get('blueprint')))
        self.blueprint_name = self.blueprint['Blueprints']['blueprint_name']
        self.hosts_map = json.load(file(self.module.params.get('hosts_map')))

    # client.blueprints('datalyse-hdfs-yarn').delete()

    def ensure_present(self):
        if self.name in [x.cluster_name for x in self.clusters]:
            return False
        else:
            if not self.blueprint_name in [x.blueprint_name for x in self.blueprints]:
                client.blueprints.create(self.blueprint_name,**self.blueprint)

            c = client.clusters.create(self.name, **self.hosts_map)
            c.wait()
            return True

    def ensure_absent(self):
        if self.name in [x.cluster_name for x in self.clusters]:
            client.clusters(self.name).delete()
            return True
        else:
            return False

                
    def create_blueprint(self, name, blueprint_data):
        client.blueprints.create(name,**blueprint_data)

    def get_clusters(self):
        filtered_images = []
        images = self.client.images()
        for i in images:
            # Docker-py version >= 0.3 (Docker API >= 1.8)
            if 'RepoTags' in i:
                repotag = ':'.join([self.name, self.tag])
                if not self.name or repotag in i['RepoTags']:
                    filtered_images.append(i)
            # Docker-py version < 0.3 (Docker API < 1.8)
            elif (not self.name or self.name == i['Repository']) and (not self.tag or self.tag == i['Tag']):
                filtered_images.append(i)
        return filtered_images

    def remove_images(self):
        images = self.get_images()
        for i in images:
            try:
                self.client.remove_image(i['Id'])
                self.changed = True
            except DockerAPIError as e:
                # image can be removed by docker if not used
                pass
def list_running_hosts(config):
    """
    Use new style inventory script (_meta) available from 1.3 as it has
    performance improvement not running inventory script for each node.
    """
    parser = urlparse(config['ambari_url'])
    client = Ambari(host=parser.hostname, port=parser.port,
                    protocol=parser.scheme,
                    username=config['ambari_user'], password=config['ambari_password'],
                    validate_ssl=False)
    cluster = next(client.clusters)

    inventory = collections.defaultdict(
        lambda: {'hosts': []},
        _meta={'hostvars': {}},
        all={}
    )

    # hostvars
    ambari_host = parser.hostname
    for host in cluster.hosts:
        inventory['_meta']['hostvars'][host.host_name] = get_node_props(host)
        if ambari_host == host.ip:
            ambari_host = host.host_name

    # groups
    for comp in cluster.host_components:
        inventory[comp.component_name]['hosts'].append(comp.host_name)

    # configs for all the components
    component_configs = get_all_configs(parser.scheme,
                                        parser.hostname,
                                        parser.port,
                                        parser.path,
                                        config['ambari_user'],
                                        config['ambari_password'],
                                        cluster.cluster_name)
    # stack versions

    stack, version = get_stack_versions(parser.scheme,
                                        parser.hostname,
                                        parser.port,
                                        parser.path,
                                        config['ambari_user'],
                                        config['ambari_password'],
                                        cluster.cluster_name)
    # group_vars
    inventory['all']['vars'] = {
        'ambari_cluster_name': cluster.cluster_name,
        'ambari_host': ambari_host,
        'ambari_password': config['ambari_password'],
        'ambari_url': config['ambari_url'],
        'ambari_user': config['ambari_user'],
        'ambari_component_configs': component_configs,
        'host_stack': stack,
        'host_stack_version': version
    }


    return inventory
    def _initialize_client(self):
        '''
            Initialize Apache Ambari client
        '''

        # disable ssl warning
        if self.config.get('validate_ssl') == False:
            urllib3.disable_warnings()

        # initiate Apache Ambari client
        self._client = Ambari(self.config.get('hostname'),
                              port=int(self.config.get('port')),
                              username=self.config.get('username'),
                              password=self.config.get('password'),
                              protocol=self.config.get('protocol'),
                              validate_ssl=self.config.get('validate_ssl'))
def main():

    module = None

    module = AnsibleModule(
        argument_spec = dict(
  	ambari_server = dict(default='localhost', type='str'), 
	ambari_pass = dict(default='admin', type='str'),
	cluster_name = dict(default='hadoop-poc',type='str'),
	config_name = dict(type='str'),
	properties = dict(type='dict')
      )
    )

    ambari_server = module.params.get('ambari_server')
    ambari_pass = module.params.get('ambari_pass')
    cluster_name = module.params.get('cluster_name')
    config_name = module.params.get('config_name')
    properties = module.params.get('properties')

    client = Ambari(ambari_server,  port=8080, username=ambari_user, password=ambari_pass)
  
    update_config(next(client.clusters), config_name, properties)

    module.exit_json(changed=True,
    ansible_facts=dict())
示例#6
0
    def __init__(self, module):
        self.client = Ambari(module.params.get('server'), \
            port=module.params.get('port'), \
            username=module.params.get('username'), \
            password=module.params.get('password'))

        self.name = self.module.params.get('name')
        self.blueprint = json.load(file(self.module.params.get('blueprint')))
        self.blueprint_name = self.blueprint['Blueprints']['blueprint_name']
        self.hosts_map = json.load(file(self.module.params.get('hosts_map')))
def test_lazy_loading():
    patch_method = 'ambariclient.client.HttpClient.request'
    with patch(patch_method, MagicMock(return_value={})) as http_request:
        client = Ambari('localhost')

        clusters = client.clusters
        assert http_request.call_count == 0, "Sent a request prior to inflation"

        clusters.inflate()
        assert http_request.call_count == 1, "inflating collection didn't hit the server"

        clusters('testcluster')
        assert http_request.call_count == 1, "getting a single cluster hit the server again"

        clusters('testcluster').inflate()
        assert http_request.call_count == 2, "inflating model didn't hit the server"

    with patch(patch_method, MagicMock(return_value={})) as http_request:
        client = Ambari('localhost')

        cluster = client.clusters('testcluster')
        assert http_request.call_count == 0, "getting model inflated collection"

        cluster.hosts
        assert http_request.call_count == 1, "accessing relationship on model didn't inflate it"

        cluster.hosts.to_dict()
        assert http_request.call_count == 2, "to_dict on relationship didn't inflate it"

    with patch(patch_method, MagicMock(return_value={})) as http_request:
        client = Ambari('localhost')

        cluster = client.clusters('testcluster')
        assert http_request.call_count == 0, "getting model inflated collection"

        cluster.cluster_name
        assert http_request.call_count == 0, "accessing prepopulated field on model inflated it"

        cluster.health_report
        assert http_request.call_count == 1, "accessing field on model didn't inflate it"
def test_lazy_loading():
    patch_method = 'ambariclient.client.HttpClient.request'
    with patch(patch_method, MagicMock(return_value={})) as http_request:
        client = Ambari('localhost')

        clusters = client.clusters
        assert http_request.call_count == 0, "Sent a request prior to inflation"

        clusters.inflate()
        assert http_request.call_count == 1, "inflating collection didn't hit the server"

        clusters('testcluster')
        assert http_request.call_count == 1, "getting a single cluster hit the server again"

        clusters('testcluster').inflate()
        assert http_request.call_count == 2, "inflating model didn't hit the server"

    with patch(patch_method, MagicMock(return_value={})) as http_request:
        client = Ambari('localhost')

        cluster = client.clusters('testcluster')
        assert http_request.call_count == 0, "getting model inflated collection"

        cluster.hosts
        assert http_request.call_count == 0, "accessing relationship on model inflated it"

        cluster.hosts.to_dict()
        assert http_request.call_count == 1, "to_dict on relationship didn't inflate it"

    with patch(patch_method, MagicMock(return_value={})) as http_request:
        client = Ambari('localhost')

        cluster = client.clusters('testcluster')
        assert http_request.call_count == 0, "getting model inflated collection"

        cluster.cluster_name
        assert http_request.call_count == 0, "accessing prepopulated field on model inflated it"

        cluster.health_report
        assert http_request.call_count == 1, "accessing field on model didn't inflate it"
示例#9
0
    def __init__(self, vcap=None, vcap_filename=None):

        self.log = Logger().get_logger(self.__class__.__name__)

        assert (vcap is not None or vcap_filename is not None) \
           and (vcap is None or vcap_filename is None), \
                "You must only provide a vcap object OR vcap_filename parameter"

        if vcap_filename is not None:
            import json
            vcap = json.load(open(vcap_filename))

        try:
            self.USER         = vcap['cluster']['user']
            self.PASSWORD     = vcap['cluster']['password']
            self.AMBARI_URL   = vcap['cluster']['service_endpoints']['ambari_console']
            self.CLUSTER_ID   = vcap['cluster']['cluster_id']
        except KeyError as e:
            self.log.error("Couldn't parse vcap credential json - attribute {} not found.".format(str(e)))
            raise

        # ensure we are compatible with python 2 and 3
        try:
            from urllib.parse import urlparse
        except ImportError:
            from urlparse import urlparse

        url = urlparse(self.AMBARI_URL)

        self.HOST = url.hostname
        self.PORT = url.port
        self.PROTOCOL = url.scheme

        from ambariclient.client import Ambari
        self.ambari_client = Ambari(self.HOST, 
                                    port=self.PORT, 
                                    username=self.USER, 
                                    password=self.PASSWORD, 
                                    protocol=self.PROTOCOL)
示例#10
0
def main(args):
    quiet = not args.verbose
    print_topology_meta(args.topology)

    models.LOCALTIME_MOUNT = False
    models.PRIVILEGED_CONTAINER = True  # 'privileged' containers are needed to have systemd work with no issues

    os_major_version = (args.operating_system or
                        DEFAULT_OPERATING_SYSTEM)[6]  # always assume 'centosX'
    image = '{}/topology_nodebase:{}'.format(
        defaults['DEFAULT_REPOSITORY'], args.operating_system
        or DEFAULT_OPERATING_SYSTEM)
    primary_node = models.Node(hostname='node-1',
                               group='nodes',
                               image=image,
                               ports=[{
                                   AMBARI_PORT: AMBARI_PORT
                               }])
    secondary_node = models.Node(hostname='node-2', group='nodes', image=image)
    cluster = models.Cluster(primary_node, secondary_node)
    cluster.start(args.network)

    hdp_version_tuple = version_tuple(args.hdp_version)
    stack_version = '{}.{}'.format(hdp_version_tuple[0], hdp_version_tuple[1])
    stack_version_tuple = (hdp_version_tuple[0], hdp_version_tuple[1])
    DEFAULT_CLUSTER_HOST_MAPPING[0]['hosts'][0]['fqdn'] = primary_node.fqdn
    DEFAULT_CLUSTER_HOST_MAPPING[1]['hosts'][0]['fqdn'] = secondary_node.fqdn

    host_groups = DEFAULT_BASE_HOST_GROUPS
    if not args.bare:
        if hdp_version_tuple <= (2, 0, 13, 0):
            host_groups[0]['components'].extend(
                EXTRA_HOST_GROUPS_2_0_13_0[0]['components'])
            host_groups[1]['components'].extend(
                EXTRA_HOST_GROUPS_2_0_13_0[1]['components'])
        elif hdp_version_tuple <= (2, 4, 0, 0):
            host_groups[0]['components'].extend(
                EXTRA_HOST_GROUPS_2_4_0_0[0]['components'])
            host_groups[1]['components'].extend(
                EXTRA_HOST_GROUPS_2_4_0_0[1]['components'])
        elif hdp_version_tuple <= (2, 6, 4, 0):
            host_groups[0]['components'].extend(
                EXTRA_HOST_GROUPS_2_6_4_0[0]['components'])
            host_groups[1]['components'].extend(
                EXTRA_HOST_GROUPS_2_6_4_0[1]['components'])
        elif hdp_version_tuple <= (3, 1, 0, 0):
            host_groups[0]['components'].extend(
                EXTRA_HOST_GROUPS_3_1_0_0[0]['components'])
            host_groups[1]['components'].extend(
                EXTRA_HOST_GROUPS_3_1_0_0[1]['components'])
        else:
            host_groups[0]['components'].extend(
                DEFAULT_EXTRA_HOST_GROUPS[0]['components'])
            host_groups[1]['components'].extend(
                DEFAULT_EXTRA_HOST_GROUPS[1]['components'])

    if hdp_version_tuple <= (
            2, 0, 13,
            0):  # APP_TIMELINE_SERVER not applicable for this version
        host_groups[0]['components'] = list(
            filter(lambda x: x.get('name') != 'APP_TIMELINE_SERVER',
                   host_groups[0]['components']))

    repo_url_host = 'http://public-repo-1.hortonworks.com'
    ambari_repo_url = ('{}/ambari/centos{}/{}.x/updates/{}/'
                       'ambari.repo'.format(repo_url_host, os_major_version,
                                            args.ambari_version[0],
                                            args.ambari_version))
    hdp_repo_url = ('{}/HDP/centos{}/{}.x/updates/{}'.format(
        repo_url_host, os_major_version, args.hdp_version[0],
        args.hdp_version))

    for node in cluster:
        node.execute('wget -nv {} -O /etc/yum.repos.d/ambari.repo'.format(
            ambari_repo_url),
                     quiet=quiet)

    logger.info('Installing Ambari server and agents ...')
    primary_node.execute('yum -y install ambari-server', quiet=quiet)
    primary_node.execute('ambari-server setup -v -s', quiet=quiet)
    primary_node.execute('ambari-server start', quiet=quiet)

    for node in cluster:
        node.execute('yum -y install ambari-agent', quiet=quiet)
        ambari_agent_config = node.get_file(AMBARI_AGENT_CONFIG_FILE_PATH)
        node.put_file(
            AMBARI_AGENT_CONFIG_FILE_PATH,
            re.sub(r'(hostname)=.*', r'\1={}'.format(primary_node.fqdn),
                   ambari_agent_config))
        node.execute('ambari-agent start', quiet=quiet)

    mysql_config_commands = [
        ('wget -nv -O /tmp/mysql-connector-java.tar.gz '
         'https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-5.1.45.tar.gz'
         ), 'gzip -d /tmp/mysql-connector-java.tar.gz',
        'tar -xf /tmp/mysql-connector-java.tar -C /tmp',
        ('cp /tmp/mysql-connector-java-5.1.45/mysql-connector-java-5.1.45-bin.jar '
         '/tmp/mysql-connector-java.jar'),
        'ambari-server setup --jdbc-db=mysql --jdbc-driver=/tmp/mysql-connector-java.jar',
        'rm -rf /tmp/mysql-connector-java*'
    ]
    primary_node.execute(' && '.join(mysql_config_commands), quiet=quiet)

    # Docker for Mac exposes ports that can be accessed only with ``localhost:<port>`` so
    # use that instead of the hostname if the host name is ``moby``.
    hostname = ('localhost' if models.client.info().get('Name') == 'moby' else
                socket.getaddrinfo(
                    socket.gethostname(), 0, flags=socket.AI_CANONNAME)[0][3])
    port = primary_node.host_ports.get(AMBARI_PORT)
    server_url = 'http://{}:{}'.format(hostname, port)
    logger.info('Ambari server is now reachable at %s', server_url)

    ambari = Ambari(server_url,
                    username=DEFAULT_AMBARI_USERNAME,
                    password=DEFAULT_AMBARI_PASSWORD)

    logger.info('Waiting for all hosts to be visible in Ambari ...')

    def condition(ambari, cluster):
        cluster_hosts = {node.fqdn for node in cluster}
        ambari_hosts = {host.host_name for host in ambari.hosts}
        logger.debug('Cluster hosts: %s; Ambari hosts: %s', cluster_hosts,
                     ambari_hosts)
        return cluster_hosts == ambari_hosts

    wait_for_condition(condition=condition, condition_args=[ambari, cluster])

    logger.info('Updating install repo to use %s HDP version ...',
                args.hdp_version)
    # based off of release notes of https://bit.ly/2R06NKp
    if stack_version_tuple >= (2, 6):
        url = join_url_parts(hdp_repo_url, 'build.id')
        response = requests.get(url)
        response.raise_for_status()
        build_number = next(
            (int(item.split(':')[1].strip())
             for item in response.text.split('\n') if 'BUILD_NUMBER' in item),
            None)
        if not build_number:
            raise Exception(
                'Could not determine build number as required for repo setting. Build data found: ',
                response.text)

        # version_definitions not yet supported by Ambari client library - a TODO
        hdp_repo_version = '{}-{}'.format(args.hdp_version, build_number)
        version_definition = {
            'VersionDefinition': {
                'version_url':
                '{}/HDP-{}.xml'.format(hdp_repo_url, hdp_repo_version)
            }
        }
        url = join_url_parts(server_url, 'api', 'v1', 'version_definitions')
        data = json.dumps(version_definition)
        response = requests.post(
            url,
            data=data,
            auth=(DEFAULT_AMBARI_USERNAME, DEFAULT_AMBARI_PASSWORD),
            headers={'X-Requested-By': 'topology_hdp build'})
        response.raise_for_status()
    else:
        hdp_os = ambari.stacks('HDP').versions(
            stack_version).operating_systems('redhat6')
        hdp_os.repositories('HDP-{}'.format(stack_version)).update(
            base_url=hdp_repo_url, verify_base_url=False)
        hdp_repo_version = None
        build_number = None

    logger.info('Creating `cluster` with pre-defined components ...')
    ambari.blueprints('cluster').create(blueprint_name='cluster',
                                        stack_version=stack_version,
                                        stack_name='HDP',
                                        host_groups=host_groups)

    logger.info('Installing cluster components ...')
    hdp_cluster = ambari.clusters('cluster')
    # INSTALL_ONLY option not applicable for <= 2.0.13.0 ver, it will be install and start services.
    if hdp_version_tuple <= (2, 0, 13, 0):
        hdp_cluster = hdp_cluster.create(
            blueprint='cluster',
            default_password='******',
            host_groups=DEFAULT_CLUSTER_HOST_MAPPING)
    elif hdp_repo_version:
        hdp_cluster = hdp_cluster.create(
            blueprint='cluster',
            default_password='******',
            repository_version=hdp_repo_version,
            host_groups=DEFAULT_CLUSTER_HOST_MAPPING,
            provision_action='INSTALL_ONLY')
    else:
        hdp_cluster = hdp_cluster.create(
            blueprint='cluster',
            default_password='******',
            host_groups=DEFAULT_CLUSTER_HOST_MAPPING,
            provision_action='INSTALL_ONLY')

    time.sleep(
        30
    )  # Some versions of Ambari provide wrong status on wait. Need to slug some time.
    hdp_cluster.wait(timeout=5400, interval=30)

    logger.info('Waiting for all hosts to reach healthy state ...')

    def condition(ambari):
        health_report = hdp_cluster.health_report
        logger.debug('Ambari cluster health report: %s ...', health_report)
        return health_report.get('Host/host_state/HEALTHY') == len(
            list(ambari.hosts))

    wait_for_condition(condition=condition, condition_args=[ambari])

    logger.info('Waiting for components to be verified ...')

    def condition(ambari):
        comps = hdp_cluster.cluster.host_components.refresh()
        for comp in comps:
            if comp.state.upper() == 'UNKNOWN':
                logger.debug('Not ready with component `%s` ...',
                             comp.component_name)
                return False
        else:
            return True

    wait_for_condition(condition=condition, condition_args=[ambari])

    hdp_services_state = set(service['state']
                             for service in hdp_cluster.services.to_dict())
    if 'STARTED' in hdp_services_state or 'STARTING' in hdp_services_state:
        logger.info('Ambari task queued to stop services ...')
        hdp_cluster.cluster.services.stop().wait()

    logger.info('Stopping Ambari for saving to Docker image ...')
    for node in cluster:
        node.execute('ambari-agent stop', quiet=quiet)

    primary_node.execute('ambari-server stop', quiet=quiet)
    primary_node.execute('service postgresql stop', quiet=quiet)

    for node in cluster:
        node.execute('; '.join(
            ['yum clean all',
             'cat /dev/null > ~/.bash_history && history -c']),
                     quiet=quiet)

    repository = '{}/topology_hdp'.format(args.repository
                                          or defaults['DEFAULT_REPOSITORY'])
    tag_prefix = 'hdp{}_ambari{}'.format(args.hdp_version, args.ambari_version)
    primary_node_tag = '{}_{}'.format(tag_prefix, 'primary-node')
    secondary_node_tag = '{}_{}'.format(tag_prefix, 'secondary-node')

    logger.info('Committing the primary node container as %s %s',
                primary_node_tag,
                ('and pushing its image to {} ...'.format(repository)
                 if args.push else '...'))
    primary_node.commit(repository=repository,
                        tag=primary_node_tag,
                        push=args.push)
    logger.info('Committing the secondary node container as %s %s',
                secondary_node_tag,
                ('and pushing its image to {} ...'.format(repository)
                 if args.push else '...'))
    secondary_node.commit(repository=repository,
                          tag=secondary_node_tag,
                          push=args.push)

    if args.retain:
        logger.info('Starting Ambari ...')
        primary_node.execute('service postgresql start', quiet=quiet)
        primary_node.execute('ambari-server start', quiet=quiet)
        for node in cluster:
            node.execute('ambari-agent start', quiet=quiet)
    else:
        logger.info('Removing the containers ...')
        primary_node.stop()
        secondary_node.stop()
示例#11
0
def main(args):
    quiet = not args.verbose
    print_topology_meta(args.topology)

    if args.include_services and args.exclude_services:
        raise ValueError(
            'Cannot pass both --include-services and --exclude-services.')

    image_prefix = '{}/{}/topology_hdp:hdp{}_ambari{}'.format(
        args.registry, args.namespace or DEFAULT_NAMESPACE, args.hdp_version,
        args.ambari_version)
    primary_node_image = '{}_{}'.format(image_prefix, 'primary-node')
    secondary_node_image = '{}_{}'.format(image_prefix, 'secondary-node')

    clusterdock_config_host_dir = os.path.realpath(
        os.path.expanduser(args.clusterdock_config_directory))
    volumes = [{clusterdock_config_host_dir: CLUSTERDOCK_CLIENT_CONTAINER_DIR}]

    primary_node = Node(hostname=args.primary_node[0],
                        group='primary',
                        volumes=volumes,
                        image=primary_node_image,
                        ports=[{
                            AMBARI_PORT: AMBARI_PORT
                        } if args.predictable else AMBARI_PORT])

    secondary_nodes = [
        Node(hostname=hostname,
             group='secondary',
             volumes=volumes,
             image=secondary_node_image) for hostname in args.secondary_nodes
    ]

    cluster = Cluster(primary_node, *secondary_nodes)
    cluster.primary_node = primary_node
    cluster.secondary_nodes = secondary_nodes

    for node in cluster.nodes:
        node.volumes.append({'/sys/fs/cgroup': '/sys/fs/cgroup'})
        # do not use tempfile.mkdtemp, as systemd wont be able to bring services up when temp ends to be created in
        # /var/tmp/ directory
        node.volumes.append(['/run', '/run/lock'])

    cluster.start(args.network)

    hdp_version_tuple = version_tuple(args.hdp_version)

    logger.debug('Starting PostgreSQL for Ambari server ...')

    # Need this as init system in Docker misreports on postgres start initially
    # Check https://github.com/docker-library/postgres/issues/146 for more
    def condition():
        primary_node.execute('service postgresql restart', quiet=quiet)
        if '1 row' in primary_node.execute(
                'PGPASSWORD=bigdata psql ambari '
                '-U ambari -h localhost -c "select 1"',
                quiet=quiet).output:
            return True

    wait_for_condition(condition=condition, time_between_checks=2)

    def condition():
        if 'running' in primary_node.execute('service postgresql status',
                                             quiet=quiet).output:
            return True

    wait_for_condition(condition=condition)

    time.sleep(
        10
    )  # If images are set to start Ambari server/agents - give some time to recover the right status
    _update_node_names(cluster, quiet=quiet)

    # The HDP topology uses two pre-built images ('primary' and 'secondary'). If a cluster
    # larger than 2 nodes is started, some modifications need to be done.
    if len(secondary_nodes) > 1:
        _remove_files(nodes=secondary_nodes[1:],
                      files=['/hadoop/hdfs/data/current/*'],
                      quiet=quiet)

    logger.info('Starting Ambari server ...')
    primary_node.execute('ambari-server start', quiet=quiet)

    # Docker for Mac exposes ports that can be accessed only with ``localhost:<port>`` so
    # use that instead of the hostname if the host name is ``moby``.
    hostname = ('localhost'
                if client.info().get('Name') == 'moby' else socket.getaddrinfo(
                    socket.gethostname(), 0, flags=socket.AI_CANONNAME)[0][3])
    port = cluster.primary_node.host_ports.get(AMBARI_PORT)
    server_url = 'http://{}:{}'.format(hostname, port)
    logger.info('Ambari server is now reachable at %s', server_url)

    logger.info('Starting Ambari agents ...')
    for node in cluster:
        logger.debug('Starting Ambari agent on %s ...', node.fqdn)
        node.execute('ambari-agent start', quiet=quiet)

    ambari = Ambari(server_url, username='******', password='******')

    def condition(ambari, cluster):
        cluster_hosts = {node.fqdn for node in cluster}
        ambari_hosts = {host.host_name for host in ambari.hosts}
        logger.debug('Cluster hosts: %s; Ambari hosts: %s', cluster_hosts,
                     ambari_hosts)
        return cluster_hosts == ambari_hosts

    wait_for_condition(condition=condition, condition_args=[ambari, cluster])

    service_types_to_leave = (args.include_services.upper().split(',')
                              if args.include_services else [])
    service_types_to_remove = (args.exclude_services.upper().split(',')
                               if args.exclude_services else [])
    if service_types_to_leave or service_types_to_remove:
        for service in list(ambari.clusters(DEFAULT_CLUSTER_NAME).services):
            service_name = service.service_name.upper()
            if (service_name in service_types_to_remove
                    or (service_types_to_leave
                        and service_name not in service_types_to_leave)):
                logger.info('Removing cluster service (name = %s) ...',
                            service_name)
                service.delete()

    for node in secondary_nodes[1:]:
        logger.info('Adding %s to cluster ...', node.fqdn)
        ambari.clusters(DEFAULT_CLUSTER_NAME).hosts.create(node.fqdn)
        secondary_node = ambari.clusters(DEFAULT_CLUSTER_NAME).hosts(
            secondary_nodes[0].fqdn)
        for component in secondary_node.components:
            logger.debug('Adding component (%s) to cluster on host (%s) ...',
                         component.component_name, node.fqdn)
            host_components = ambari.clusters(DEFAULT_CLUSTER_NAME).hosts(
                node.fqdn).components
            host_components.create(component.component_name).wait()

        logger.debug('Installing all registered components on host (%s) ...',
                     node.fqdn)
        ambari.clusters(DEFAULT_CLUSTER_NAME).hosts(
            node.fqdn).components.install().wait()

    logger.info('Waiting for all hosts to reach healthy state ...')

    def condition(ambari):
        health_report = ambari.clusters(DEFAULT_CLUSTER_NAME).health_report
        logger.debug('Ambari cluster health report: %s ...', health_report)
        return health_report.get('Host/host_state/HEALTHY') == len(
            list(ambari.hosts))

    wait_for_condition(condition=condition, condition_args=[ambari])

    service_names = [
        service['service_name'] for service in ambari.clusters(
            DEFAULT_CLUSTER_NAME).services.to_dict()
    ]

    if 'ATLAS' in service_names:
        logger.info('Configuring Atlas required properties ...')
        _configure_atlas(ambari,
                         args.hdp_version,
                         atlas_server_host=cluster.primary_node.fqdn)

    if 'HIVE' in service_names:
        primary_node.execute('touch /etc/hive/sys.db.created', quiet=quiet)

    logger.info('Waiting for components to be ready ...')

    def condition(ambari):
        comps = ambari.clusters(
            DEFAULT_CLUSTER_NAME).cluster.host_components.refresh()
        for comp in comps:
            if comp.state.upper() == 'UNKNOWN':
                logger.debug('Not ready with component `%s` ...',
                             comp.component_name)
                return False
        else:
            return True

    wait_for_condition(condition=condition, condition_args=[ambari])

    if not args.dont_start_cluster:
        logger.info('Starting cluster services ...')
        ambari.clusters(DEFAULT_CLUSTER_NAME).services.start().wait(
            timeout=3600)

        if 'HBASE' in service_names:
            logger.info('Starting Thrift server ...')
            if hdp_version_tuple <= (2, 0, 13, 0):
                hbase_daemon_path = '/usr/lib/hbase/bin/hbase-daemon.sh'
            else:
                hbase_daemon_path = '/usr/hdp/current/hbase-master/bin/hbase-daemon.sh'
            primary_node.execute('{} start thrift -p {} '
                                 '--infoport {}'.format(
                                     hbase_daemon_path,
                                     HBASE_THRIFT_SERVER_PORT,
                                     HBASE_THRIFT_SERVER_INFO_PORT),
                                 quiet=quiet)
示例#12
0
def amclient(config):
    ''' Take a config json as input, return a ambariclient.Client.Ambari
    instance'''
    client = Ambari(config['ambari']['host'], port=config['ambari']['port'],
        username=config['ambari']['user'], password=config['ambari']['pass'])
    return client
示例#13
0
    # Do some sanity checks on the config
    requiredAttribs = [
        'serviceName', 'package', 'components', 'configurations'
    ]
    for attrib in requiredAttribs:
        if not attrib in service_config:
            log.error("Invalid configuration. Missing required attribute '%s'",
                      attrib)
            sys.exit(3)

    log.info('Installing service: %s on ambari host: %s',
             service_config['serviceName'], args.ambari_host)
    ambari_host_uri = URL(args.ambari_host)
    ambari_client = Ambari(ambari_host_uri.host(),
                           port=ambari_host_uri.port(),
                           protocol=ambari_host_uri.scheme(),
                           username=args.username,
                           password=args.password,
                           identifier='hdiapps')
    # If this is being invoked from outside the cluster, we must fixup the href references contained within the responses
    ambari_client.client.request_params['hooks'] = dict(
        response=shared_lib.Fixup(ambari_host_uri).fixup)
    # Assume we only have 1 cluster managed by this Ambari installation
    cluster = ambari_client.clusters.next()
    log.debug('Cluster: %s, href: %s', cluster.cluster_name, cluster._href)

    # Pull in any extra dynamic configuration
    if args.extra_config:
        try:
            extra_config = json.loads(args.extra_config)
            log.debug(
                'Applying dynamic service configuration specified on command-line: %s',
示例#14
0
def main(cm_fqhn, cm_port, cm_user_name, cm_user_password, cm_cluster_name, cm_tls_enabled):
  print  cm_fqhn, cm_port, cm_user_name, cm_user_password, cm_cluster_name, cm_tls_enabled
  
  cm_protocol = 'https'
  if cm_tls_enabled.lower() == 'false':
    cm_protocol = 'http'
  #print 'Protocol:', cm_protocol
  ambari_accessor = api_accessor(host=cm_fqhn, 
                                 login=cm_user_name, 
                                 password=cm_user_password, 
                                 protocol=cm_protocol, 
                                 port=cm_port)
    
  zookeeper_config = get_properties2(cluster=cm_cluster_name, 
                                     config_type=CONFIG_TYPE_MAP['zookeeper'],
                                     accessor=ambari_accessor)
  zk_client_port = zookeeper_config.get(CONFIG_PROPERTY_MAP['zk_client_port'])
  if zk_client_port != None:
        CONFIG_KEY_VALUE_MAP['ZOOKEEPER_PORT'] = zk_client_port

  hdfs_site_config = get_properties2(cluster=cm_cluster_name, 
                                     config_type=CONFIG_TYPE_MAP['hdfs'],
                                     accessor=ambari_accessor)
  #print "\nHDFS-SITE:\n", hdfs_site_config  
  hdfs_nn_ns = hdfs_site_config.get(CONFIG_PROPERTY_MAP['hdf_nn_ns'], None)
  hdfs_nn_rpc = hdfs_site_config.get(CONFIG_PROPERTY_MAP['hdf_nn_rpc'])
  print '\nHDFS-SITE:', hdfs_nn_ns, hdfs_nn_rpc
  if hdfs_nn_ns == None:
        CONFIG_KEY_VALUE_MAP['NAME_NODE'] = 'hdfs://' + hdfs_nn_rpc
  else:
        CONFIG_KEY_VALUE_MAP['NAME_NODE'] = hdfs_nn_ns

  yarn_site_config = get_properties2(cluster=cm_cluster_name, 
                                     config_type=CONFIG_TYPE_MAP['yarn'],
                                     accessor=ambari_accessor)
  #print "\nYARN-SITE:\n", yarn_site_config
  yarn_rm_address = yarn_site_config.get(CONFIG_PROPERTY_MAP['yarn_rm_address'])
  CONFIG_KEY_VALUE_MAP['JOB_TRACKER'] = yarn_rm_address  
    
  hbase_site_config = get_properties2(cluster=cm_cluster_name, 
                                     config_type=CONFIG_TYPE_MAP['hbase'],
                                     accessor=ambari_accessor)
  #print "\nHBASE-SITE:\n", hbase_site_config
                                                             
                                                             
  kafka_broker_config = get_properties2(cluster=cm_cluster_name, 
                                     config_type=CONFIG_TYPE_MAP['kafka'],
                                     accessor=ambari_accessor)
  #print "\nKAFAKA_BROKER:\n", kafka_broker_config
  kafka_client_security_protocol = kafka_broker_config.get(CONFIG_PROPERTY_MAP['kafka_client_security_protocol'])
  #print 'Kafka protocol:', kafka_client_security_protocol
  if kafka_client_security_protocol != None:
        CONFIG_KEY_VALUE_MAP['KAFKA_SECURITY_PROTOCOL'] = kafka_client_security_protocol
                                       
                                                             
  oozie_server_config = get_properties2(cluster=cm_cluster_name, 
                                       config_type=CONFIG_TYPE_MAP['oozie'],
                                       accessor=ambari_accessor)
  #print "\nOOZIE_SERVER:\n", oozie_server_config
  oozie_url = oozie_server_config.get(CONFIG_PROPERTY_MAP['oozie_base_url'])
  CONFIG_KEY_VALUE_MAP['OOZIE_URL'] = oozie_url 

  
    
  if cm_tls_enabled.lower() == 'false':
    api = Ambari(cm_fqhn, port=cm_port, username=cm_user_name, password=cm_user_password, validate_ssl=False)
  else:
    api = Ambari(cm_fqhn, port=cm_port, username=cm_user_name, password=cm_user_password, validate_ssl=False, protocol='https')
    
    
  # Get a list of all clusters
  cdh_cluster = None
  
  for c in api.clusters():
     if c.cluster_name == cm_cluster_name:
       print '\nCluster:', c.cluster_name
       cdh_cluster = c
       for x in cdh_cluster.hosts():
         HOST_MAP[x.host_name] =  x.host_name
       print '\nHost Name Mapping:'
       print HOST_MAP
       print '\nServices:'
       for x in cdh_cluster.services():
         print x.service_name
       #for x in cdh_cluster.configurations(): 
       #  print x.type
            
       
       #ZooKeeper
       zk_hosts = []
       zk_service  = cdh_cluster.services(SERVICE_NAME_MAP['zookeeper'])
       #print zk_service
       zk_server_cmps = zk_service.components(SERVICE_COMPONENT_NAME_MAP['zookeeper_server'])
       #print zk_server_cmps
       for x in zk_server_cmps.host_components:
         zk_hosts.append(x.host_name)
       #print 'ZOOKEEPER HOSTS:', zk_hosts
       if len(zk_hosts) > 0:
         CONFIG_KEY_VALUE_MAP['ZOOKEEPER_QUORUM'] = ' '.join(zk_hosts)
     
       #HDFS
       hdfs_nn_hosts = []
       hdfs_service  = cdh_cluster.services(SERVICE_NAME_MAP['hdfs'])
       hdfs_nn_cmps = hdfs_service.components(SERVICE_COMPONENT_NAME_MAP['namenode'])
       for x in hdfs_nn_cmps.host_components:
         hdfs_nn_hosts.append(x.host_name)
       
       
       #YARN RM
       yarn_rm_hosts = []
       yarn_service  = cdh_cluster.services(SERVICE_NAME_MAP['yarn'])
       yarn_rm_cmps = yarn_service.components(SERVICE_COMPONENT_NAME_MAP['resourcemanager'])
       for x in yarn_rm_cmps.host_components:
         yarn_rm_hosts.append(x.host_name)
       #print yarn_rm_hosts
    
    
       #OOZIE
       oozie_hosts = []
       oozie_service  = cdh_cluster.services(SERVICE_NAME_MAP['oozie'])
       oozie_server_cmps = oozie_service.components(SERVICE_COMPONENT_NAME_MAP['oozie_server'])
       for x in oozie_server_cmps.host_components:
         oozie_hosts.append(x.host_name)
       #print oozie_hosts
       
       #HBASE REST SERVER not managed by Ambari
       hbase_service  = cdh_cluster.services(SERVICE_NAME_MAP['hbase'])
       
       #KAFKA
       kafka_broker_hosts = []
       kafka_service  = cdh_cluster.services(SERVICE_NAME_MAP['kafka'])
       kafka_broker_cmps = kafka_service.components(SERVICE_COMPONENT_NAME_MAP['kafka_broker'])
       for x in kafka_broker_cmps.host_components:
         kafka_broker_hosts.append(x.host_name)
       if len(kafka_broker_hosts) > 0:
         CONFIG_KEY_VALUE_MAP['KAFKA_BROKER'] = ' '.join(kafka_broker_hosts)
       #print kafka_broker_hosts
    
       
   
    
        
       # Print all
       print '\nOUTPUT:\n', CONFIG_KEY_VALUE_MAP 
示例#15
0
                    help="Ambari user password",
                    required=True)
parser.add_argument("-C",
                    action="store",
                    dest='cluster',
                    help="Cluster name to check",
                    required=True)
parser.add_argument("-S",
                    action="store",
                    dest='service',
                    help="Service name to check",
                    required=True)
args = parser.parse_args()

client = Ambari(args.host,
                port=args.port,
                username=args.user_name,
                password=args.password)
states = {'OK': 0, 'WARNING': 1, 'CRITICAL': 2, 'UNKNOWN': 3}

#for a in client.clusters(args.cluster).services(args.service).alerts.to_dict():
#  alerts = a.alerts.to_dict()
alerts = client.clusters(args.cluster).services(args.service).alerts.to_dict()
OK = []
WARNING = []
UNKNOWN = []
CRITICAL = []
MAINTENANCE = []
for a in alerts:
    if a['state'] == 'OK':
        OK.append((a['cluster_name'], a['service_name'], a['component_name'],
                   a['definition_name'], a['text']))
                    help="increase output verbosity",
                    required=True)
parser.add_argument("-u",
                    action="store",
                    dest='user_name',
                    help="Ambari username ",
                    required=True)
parser.add_argument("-p",
                    action="store",
                    dest='password',
                    help="Ambari user password",
                    required=True)
args = parser.parse_args()

client = Ambari(args.host,
                port=args.port,
                username=args.user_name,
                password=args.password)

aclusters = []
services = []
components = []
cnames = []

host_template = open(
    'ambari-host-template.conf.j2').read()  # "NAME", "ADDRESS",
#                           USER, PASSWORD, PORT, SERVICES, CLUSTER
service_template = open('ambari-service-template.conf').read()  # "SERVICE"


def create_group_conf(component, outfile):
    conf = host_group_template.replace('COMPONENT', component)
示例#17
0
def main(args):
    image_prefix = '{}/{}/topology_hdp:hdp{}_ambari{}'.format(
        args.registry, args.namespace or DEFAULT_NAMESPACE, args.hdp_version,
        args.ambari_version)
    primary_node_image = '{}_{}'.format(image_prefix, 'primary-node')
    secondary_node_image = '{}_{}'.format(image_prefix, 'secondary-node')

    primary_node = Node(hostname=args.primary_node[0],
                        group='primary',
                        image=primary_node_image,
                        ports=[{
                            AMBARI_PORT: AMBARI_PORT
                        } if args.predictable else AMBARI_PORT])

    secondary_nodes = [
        Node(hostname=hostname, group='secondary', image=secondary_node_image)
        for hostname in args.secondary_nodes
    ]

    cluster = Cluster(primary_node, *secondary_nodes)
    cluster.primary_node = primary_node
    cluster.secondary_nodes = secondary_nodes
    cluster.start(args.network)

    logger.debug('Starting PostgreSQL for Ambari server ...')
    primary_node.execute('service postgresql start', quiet=not args.verbose)
    _update_node_names(cluster, quiet=not args.verbose)

    # The HDP topology uses two pre-built images ('primary' and 'secondary'). If a cluster
    # larger than 2 nodes is started, some modifications need to be done.
    if len(secondary_nodes) > 1:
        _remove_files(nodes=secondary_nodes[1:],
                      files=['/hadoop/hdfs/data/current/*'])

    logger.info('Starting Ambari server ...')
    primary_node.execute('ambari-server start', quiet=not args.verbose)

    # Docker for Mac exposes ports that can be accessed only with ``localhost:<port>`` so
    # use that instead of the hostname if the host name is ``moby``.
    hostname = 'localhost' if client.info().get(
        'Name') == 'moby' else socket.gethostname()
    port = cluster.primary_node.host_ports.get(AMBARI_PORT)
    server_url = 'http://{}:{}'.format(hostname, port)
    logger.info('Ambari server is now reachable at %s', server_url)

    logger.info('Starting Ambari agents ...')
    for node in cluster:
        logger.debug('Starting Ambari agent on %s ...', node.fqdn)
        node.execute('ambari-agent start', quiet=not args.verbose)

    ambari = Ambari(server_url, username='******', password='******')

    def condition(ambari, cluster):
        cluster_hosts = {node.fqdn for node in cluster}
        ambari_hosts = {host.host_name for host in ambari.hosts}
        logger.debug('Cluster hosts: %s; Ambari hosts: %s', cluster_hosts,
                     ambari_hosts)
        return cluster_hosts == ambari_hosts

    wait_for_condition(condition=condition, condition_args=[ambari, cluster])

    for node in secondary_nodes[1:]:
        logger.info('Adding %s to cluster ...', node.fqdn)
        ambari.clusters('cluster').hosts.create(node.fqdn)
        for component in ambari.clusters('cluster').hosts(
                secondary_nodes[0].fqdn).components:
            logger.debug('Adding component (%s) to cluster on host (%s) ...',
                         component.component_name, node.fqdn)
            host_components = ambari.clusters('cluster').hosts(
                node.fqdn).components
            host_components.create(component.component_name).wait()

        logger.debug('Installing all registered components on host (%s) ...',
                     node.fqdn)
        ambari.clusters('cluster').hosts(node.fqdn).components.install().wait()

    if not args.dont_start_cluster:
        logger.debug(
            'Waiting for all hosts to reach healthy state before starting cluster ...'
        )

        def condition(ambari):
            health_report = ambari.clusters('cluster').health_report
            logger.debug('Ambari cluster health report: %s ...', health_report)
            return health_report.get('Host/host_state/HEALTHY') == len(
                list(ambari.hosts))

        wait_for_condition(condition=condition, condition_args=[ambari])

        logger.info('Starting cluster services ...')
        ambari.clusters('cluster').services.start().wait()
示例#18
0
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
    NAME = 'ambari'

    def verify_file(self, path):
        ''' return true/false if this is possibly a valid file for this plugin to consume '''
        valid = False
        if super(InventoryModule, self).verify_file(path):
            # base class verifies that file exists and is readable by current user
            if path.endswith(('.ambari.yaml', '.ambari.yml')):
                valid = True
        return valid

    def parse(self, inventory, loader, path, cache=False):
        # call base method to ensure properties are available for use with other helper methods
        super(InventoryModule, self).parse(inventory, loader, path)

        # this method will parse 'common format' inventory sources and
        # update any options declared in DOCUMENTATION as needed
        config_data = self._read_config_data(path)

        # initialize Apache Ambari client
        self._initialize_client()

        # get cluster name
        _cluster_name = self._get_cluster_name()

        # get services name
        _services_name = self._get_services_name(_cluster_name)

        # get hosts name
        _hosts_name = self._get_hosts_name(_cluster_name)

        # populate groups
        self._populate_groups(_cluster_name, _services_name)

        # populate hosts
        self._populate_hosts(_cluster_name, _services_name, _hosts_name)

        # populate ambari server
        self._populate_ambari(_cluster_name)

        # populate localhost
        self._populate_localhost()

    ###########################################################################
    # Engine
    ###########################################################################

    def _populate_groups(self, cluster_name, services_name):
        '''
            Populate groups
            :param cluster_name: name of the cluster
            :param services_name: name of the services
        '''
        for service_name in services_name:
            self.inventory.add_group(service_name.lower())

            for component_name in self._get_components_name(
                    cluster_name, service_name):
                self.inventory.add_group(component_name.lower())
                if service_name.lower() != component_name.lower():
                    self.inventory.add_child(service_name.lower(),
                                             component_name.lower())

    def _populate_hosts(self, cluster_name, services_name, hosts_name):
        '''
            Populate hosts
            :param cluster_name: name of the cluster
            :param service_name: name of the services
            :param hosts_name: name of the hosts
        '''
        for host_name in hosts_name:
            self.inventory.add_host(host_name)

            configurations = {}

            for service_name in services_name:
                configurations_json = {}
                for service in self._get_service_current_configuration(
                        cluster_name, service_name)['items']:
                    configuration_json = {}
                    for configuration in service['configurations']:
                        configuration_json[configuration[
                            'type']] = configuration['properties']
                    configurations_json = configuration_json
                configurations[service_name.lower()] = configurations_json

            self.inventory.set_variable(host_name, 'configurations',
                                        configurations)

            host = self._get_host(host_name)
            self.inventory.set_variable(host_name, 'ansible_host', host_name)
            for field in host.fields:
                if (field.startswith('host') is not True) and (
                        field.startswith('last')
                        is not True) and field != 'desired_configs':
                    self.inventory.set_variable(host_name, field,
                                                getattr(host, field))

            if self.get_option('ansible_user'):
                self.inventory.set_variable(host_name, 'ansible_user',
                                            self.get_option('ansible_user'))
            if self.get_option('ansible_ssh_pass'):
                self.inventory.set_variable(
                    host_name, 'ansible_ssh_pass',
                    self.get_option('ansible_ssh_pass'))

            for component in self._get_host_components(cluster_name,
                                                       host_name):
                self.inventory.add_host(host_name,
                                        group=component.component_name.lower())

    def _populate_ambari(self, _cluster_name):
        '''
            Add the Ambari Server to the inventory file
            :param cluster_name: name of the cluster
        '''
        _group = 'ambari_server'
        _hostname = self.get_option('hostname')
        ambari_config = {}

        self.inventory.add_group(_group)
        self.inventory.add_host(_hostname, group=_group)
        ambari_config['protocol'] = self.get_option('protocol')
        ambari_config['port'] = self.get_option('port')
        ambari_config['username'] = self.get_option('username')
        ambari_config['password'] = self.get_option('password')
        ambari_config['validate_ssl'] = self.get_option('validate_ssl')
        ambari_config['cluster_name'] = _cluster_name

        self.inventory.set_variable(_hostname, 'ambari_config', ambari_config)

    def _populate_localhost(self):
        '''
            Add the localhost to the inventory file
        '''
        _group = 'local'
        _hostname = 'localhost'
        self.inventory.add_group(_group)
        self.inventory.add_host(_hostname, group=_group)
        self.inventory.set_variable(_hostname, 'ansible_host', '127.0.0.1')
        self.inventory.set_variable(_hostname, 'ansible_connection', 'local')
        self.inventory.set_variable(_hostname, 'ansible_become', 'false')

    ###########################################################################
    # Apache Ambari
    ###########################################################################

    def _initialize_client(self):
        '''
            Initialize Apache Ambari client
        '''
        # check not required arguments
        protocol = 'http'
        if self.get_option('protocol'):
            if self.get_option('protocol') == 'https':
                protocol = self.get_option('protocol')

        validate_ssl = False
        if self.get_option('validate_ssl'):
            if self.get_option('validate_ssl') == True:
                validate_ssl = self.get_option('validate_ssl')

        # disable ssl warning
        if validate_ssl == False:
            urllib3.disable_warnings()

        # initiate Apache Ambari client
        self._client = Ambari(self.get_option('hostname'),
                              port=int(self.get_option('port')),
                              username=self.get_option('username'),
                              password=self.get_option('password'),
                              protocol=protocol,
                              validate_ssl=validate_ssl)

    def _get_cluster_name(self):
        '''
            :return name of the cluster
        '''
        for cluster in self._client.clusters:
            return cluster.cluster_name

    def _get_services_name(self, cluster_name):
        '''
            :param cluster_name: name of the cluster
            :return names of the services installed on the cluster
        '''
        services_name = []
        for service in self._client.clusters(cluster_name).services:
            for component in service.components:
                services_name.append(component.service_name)
        return sorted(set(services_name))

    def _get_components_name(self, cluster_name, service_name):
        '''
            :param cluster_name: name of the cluster
            :param service_name: name of the service
            :return names of the components installed on the cluster
        '''
        components_name = []
        for component in self._client.clusters(cluster_name).services(
                service_name).components:
            components_name.append(component.component_name)
        return sorted(set(components_name))

    def _get_hosts_name(self, cluster_name):
        '''
            :param cluster_name: name of the cluster
            :return name of the healthy nodes on the cluster
        '''
        hosts_name = []
        for host in self._client.clusters(cluster_name).hosts:
            hosts_name.append(host.host_name)
        return sorted(set(hosts_name))

    def _get_host(self, host_name):
        '''
            :param host_name: name of the host
            :return host
        '''
        return self._client.hosts(host_name)

    def _get_host_components(self, cluster_name, host_name):
        '''
            :param cluster_name: name of the cluster
            :param host_name: name of the host
            :return components installed on the host
        '''
        return self._client.clusters(cluster_name).hosts(host_name).components

    def _get_service_current_configuration(self, cluster_name, service_name):
        '''
            :param cluster_name: name of the cluster
            :param service_name: name of the service
        '''
        protocol = 'http'
        if self.get_option('protocol'):
            if self.get_option('protocol') == 'https':
                protocol = self.get_option('protocol')

        url = protocol + '://' + self.get_option('hostname') + ':' + str(
            self.get_option('port')
        ) + '/api/v1/clusters/' + cluster_name + '/configurations/service_config_versions?service_name.in(' + service_name + ')&is_current=true'
        headers = {'X-Requested-By': 'ambari'}
        response = requests.get(url,
                                headers=headers,
                                auth=HTTPBasicAuth(
                                    self.get_option('username'),
                                    self.get_option('password')),
                                verify=False)

        if response.ok:
            return response.json()
        else:
            response.raise_for_status()
示例#19
0
    events.subscribe(models.Bootstrap, 'wait', bootstrap_done,
                     events.states.FINISHED)
    events.subscribe(models.Host, 'wait', host_progress,
                     events.states.PROGRESS)
    events.subscribe(models.Host, 'wait', host_done, events.states.FINISHED)

    config = get_default_config()
    config.update(parse_config_file())
    config.update(parse_cli_opts())

    config['validate_ssl'] = not config.pop('no_validate_ssl', False)

    if 'logger' in config:
        log(config.pop('logger'))

    ambari = Ambari(**config)

    try:
        version = ambari.version
    except Exception:  # pylint: disable=broad-except
        traceback.print_exc()
        six.print_("\nCould not connect to Ambari server - aborting!")
        sys.exit(1)

    shell_help = "\n".join([
        "Ambari client available as 'ambari'",
        " - Ambari Server is %s" % ambari.base_url,
        " - Ambari Version is %s\n" % utils.version_str(version),
        " - log(new_level) will reset the logger level",
        " - ambari_ref() will show you all available client method chains",
    ])