def list_running_hosts(config): """ Use new style inventory script (_meta) available from 1.3 as it has performance improvement not running inventory script for each node. """ parser = urlparse(config['ambari_url']) client = Ambari(host=parser.hostname, port=parser.port, protocol=parser.scheme, username=config['ambari_user'], password=config['ambari_password'], validate_ssl=False) cluster = next(client.clusters) inventory = collections.defaultdict( lambda: {'hosts': []}, _meta={'hostvars': {}}, all={} ) # hostvars ambari_host = parser.hostname for host in cluster.hosts: inventory['_meta']['hostvars'][host.host_name] = get_node_props(host) if ambari_host == host.ip: ambari_host = host.host_name # groups for comp in cluster.host_components: inventory[comp.component_name]['hosts'].append(comp.host_name) # configs for all the components component_configs = get_all_configs(parser.scheme, parser.hostname, parser.port, parser.path, config['ambari_user'], config['ambari_password'], cluster.cluster_name) # stack versions stack, version = get_stack_versions(parser.scheme, parser.hostname, parser.port, parser.path, config['ambari_user'], config['ambari_password'], cluster.cluster_name) # group_vars inventory['all']['vars'] = { 'ambari_cluster_name': cluster.cluster_name, 'ambari_host': ambari_host, 'ambari_password': config['ambari_password'], 'ambari_url': config['ambari_url'], 'ambari_user': config['ambari_user'], 'ambari_component_configs': component_configs, 'host_stack': stack, 'host_stack_version': version } return inventory
def main(): module = None module = AnsibleModule( argument_spec = dict( ambari_server = dict(default='localhost', type='str'), ambari_pass = dict(default='admin', type='str'), cluster_name = dict(default='hadoop-poc',type='str'), config_name = dict(type='str'), properties = dict(type='dict') ) ) ambari_server = module.params.get('ambari_server') ambari_pass = module.params.get('ambari_pass') cluster_name = module.params.get('cluster_name') config_name = module.params.get('config_name') properties = module.params.get('properties') client = Ambari(ambari_server, port=8080, username=ambari_user, password=ambari_pass) update_config(next(client.clusters), config_name, properties) module.exit_json(changed=True, ansible_facts=dict())
def _initialize_client(self): ''' Initialize Apache Ambari client ''' # check not required arguments protocol = 'http' if self.get_option('protocol'): if self.get_option('protocol') == 'https': protocol = self.get_option('protocol') validate_ssl = False if self.get_option('validate_ssl'): if self.get_option('validate_ssl') == True: validate_ssl = self.get_option('validate_ssl') # disable ssl warning if validate_ssl == False: urllib3.disable_warnings() # initiate Apache Ambari client self._client = Ambari(self.get_option('hostname'), port=int(self.get_option('port')), username=self.get_option('username'), password=self.get_option('password'), protocol=protocol, validate_ssl=validate_ssl)
def test_lazy_loading(): patch_method = 'ambariclient.client.HttpClient.request' with patch(patch_method, MagicMock(return_value={})) as http_request: client = Ambari('localhost') clusters = client.clusters assert http_request.call_count == 0, "Sent a request prior to inflation" clusters.inflate() assert http_request.call_count == 1, "inflating collection didn't hit the server" clusters('testcluster') assert http_request.call_count == 1, "getting a single cluster hit the server again" clusters('testcluster').inflate() assert http_request.call_count == 2, "inflating model didn't hit the server" with patch(patch_method, MagicMock(return_value={})) as http_request: client = Ambari('localhost') cluster = client.clusters('testcluster') assert http_request.call_count == 0, "getting model inflated collection" cluster.hosts assert http_request.call_count == 0, "accessing relationship on model inflated it" cluster.hosts.to_dict() assert http_request.call_count == 1, "to_dict on relationship didn't inflate it" with patch(patch_method, MagicMock(return_value={})) as http_request: client = Ambari('localhost') cluster = client.clusters('testcluster') assert http_request.call_count == 0, "getting model inflated collection" cluster.cluster_name assert http_request.call_count == 0, "accessing prepopulated field on model inflated it" cluster.health_report assert http_request.call_count == 1, "accessing field on model didn't inflate it"
def _initialize_client(self): ''' Initialize Apache Ambari client ''' # disable ssl warning if self.config.get('validate_ssl') == False: urllib3.disable_warnings() # initiate Apache Ambari client self._client = Ambari(self.config.get('hostname'), port=int(self.config.get('port')), username=self.config.get('username'), password=self.config.get('password'), protocol=self.config.get('protocol'), validate_ssl=self.config.get('validate_ssl'))
def __init__(self, vcap=None, vcap_filename=None): self.log = Logger().get_logger(self.__class__.__name__) assert (vcap is not None or vcap_filename is not None) \ and (vcap is None or vcap_filename is None), \ "You must only provide a vcap object OR vcap_filename parameter" if vcap_filename is not None: import json vcap = json.load(open(vcap_filename)) try: self.USER = vcap['cluster']['user'] self.PASSWORD = vcap['cluster']['password'] self.AMBARI_URL = vcap['cluster']['service_endpoints']['ambari_console'] self.CLUSTER_ID = vcap['cluster']['cluster_id'] except KeyError as e: self.log.error("Couldn't parse vcap credential json - attribute {} not found.".format(str(e))) raise # ensure we are compatible with python 2 and 3 try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse url = urlparse(self.AMBARI_URL) self.HOST = url.hostname self.PORT = url.port self.PROTOCOL = url.scheme from ambariclient.client import Ambari self.ambari_client = Ambari(self.HOST, port=self.PORT, username=self.USER, password=self.PASSWORD, protocol=self.PROTOCOL)
def main(args): quiet = not args.verbose print_topology_meta(args.topology) models.LOCALTIME_MOUNT = False models.PRIVILEGED_CONTAINER = True # 'privileged' containers are needed to have systemd work with no issues os_major_version = (args.operating_system or DEFAULT_OPERATING_SYSTEM)[6] # always assume 'centosX' image = '{}/topology_nodebase:{}'.format( defaults['DEFAULT_REPOSITORY'], args.operating_system or DEFAULT_OPERATING_SYSTEM) primary_node = models.Node(hostname='node-1', group='nodes', image=image, ports=[{ AMBARI_PORT: AMBARI_PORT }]) secondary_node = models.Node(hostname='node-2', group='nodes', image=image) cluster = models.Cluster(primary_node, secondary_node) cluster.start(args.network) hdp_version_tuple = version_tuple(args.hdp_version) stack_version = '{}.{}'.format(hdp_version_tuple[0], hdp_version_tuple[1]) stack_version_tuple = (hdp_version_tuple[0], hdp_version_tuple[1]) DEFAULT_CLUSTER_HOST_MAPPING[0]['hosts'][0]['fqdn'] = primary_node.fqdn DEFAULT_CLUSTER_HOST_MAPPING[1]['hosts'][0]['fqdn'] = secondary_node.fqdn host_groups = DEFAULT_BASE_HOST_GROUPS if not args.bare: if hdp_version_tuple <= (2, 0, 13, 0): host_groups[0]['components'].extend( EXTRA_HOST_GROUPS_2_0_13_0[0]['components']) host_groups[1]['components'].extend( EXTRA_HOST_GROUPS_2_0_13_0[1]['components']) elif hdp_version_tuple <= (2, 4, 0, 0): host_groups[0]['components'].extend( EXTRA_HOST_GROUPS_2_4_0_0[0]['components']) host_groups[1]['components'].extend( EXTRA_HOST_GROUPS_2_4_0_0[1]['components']) elif hdp_version_tuple <= (2, 6, 4, 0): host_groups[0]['components'].extend( EXTRA_HOST_GROUPS_2_6_4_0[0]['components']) host_groups[1]['components'].extend( EXTRA_HOST_GROUPS_2_6_4_0[1]['components']) elif hdp_version_tuple <= (3, 1, 0, 0): host_groups[0]['components'].extend( EXTRA_HOST_GROUPS_3_1_0_0[0]['components']) host_groups[1]['components'].extend( EXTRA_HOST_GROUPS_3_1_0_0[1]['components']) else: host_groups[0]['components'].extend( DEFAULT_EXTRA_HOST_GROUPS[0]['components']) host_groups[1]['components'].extend( DEFAULT_EXTRA_HOST_GROUPS[1]['components']) if hdp_version_tuple <= ( 2, 0, 13, 0): # APP_TIMELINE_SERVER not applicable for this version host_groups[0]['components'] = list( filter(lambda x: x.get('name') != 'APP_TIMELINE_SERVER', host_groups[0]['components'])) repo_url_host = 'http://public-repo-1.hortonworks.com' ambari_repo_url = ('{}/ambari/centos{}/{}.x/updates/{}/' 'ambari.repo'.format(repo_url_host, os_major_version, args.ambari_version[0], args.ambari_version)) hdp_repo_url = ('{}/HDP/centos{}/{}.x/updates/{}'.format( repo_url_host, os_major_version, args.hdp_version[0], args.hdp_version)) for node in cluster: node.execute('wget -nv {} -O /etc/yum.repos.d/ambari.repo'.format( ambari_repo_url), quiet=quiet) logger.info('Installing Ambari server and agents ...') primary_node.execute('yum -y install ambari-server', quiet=quiet) primary_node.execute('ambari-server setup -v -s', quiet=quiet) primary_node.execute('ambari-server start', quiet=quiet) for node in cluster: node.execute('yum -y install ambari-agent', quiet=quiet) ambari_agent_config = node.get_file(AMBARI_AGENT_CONFIG_FILE_PATH) node.put_file( AMBARI_AGENT_CONFIG_FILE_PATH, re.sub(r'(hostname)=.*', r'\1={}'.format(primary_node.fqdn), ambari_agent_config)) node.execute('ambari-agent start', quiet=quiet) mysql_config_commands = [ ('wget -nv -O /tmp/mysql-connector-java.tar.gz ' 'https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-5.1.45.tar.gz' ), 'gzip -d /tmp/mysql-connector-java.tar.gz', 'tar -xf /tmp/mysql-connector-java.tar -C /tmp', ('cp /tmp/mysql-connector-java-5.1.45/mysql-connector-java-5.1.45-bin.jar ' '/tmp/mysql-connector-java.jar'), 'ambari-server setup --jdbc-db=mysql --jdbc-driver=/tmp/mysql-connector-java.jar', 'rm -rf /tmp/mysql-connector-java*' ] primary_node.execute(' && '.join(mysql_config_commands), quiet=quiet) # Docker for Mac exposes ports that can be accessed only with ``localhost:<port>`` so # use that instead of the hostname if the host name is ``moby``. hostname = ('localhost' if models.client.info().get('Name') == 'moby' else socket.getaddrinfo( socket.gethostname(), 0, flags=socket.AI_CANONNAME)[0][3]) port = primary_node.host_ports.get(AMBARI_PORT) server_url = 'http://{}:{}'.format(hostname, port) logger.info('Ambari server is now reachable at %s', server_url) ambari = Ambari(server_url, username=DEFAULT_AMBARI_USERNAME, password=DEFAULT_AMBARI_PASSWORD) logger.info('Waiting for all hosts to be visible in Ambari ...') def condition(ambari, cluster): cluster_hosts = {node.fqdn for node in cluster} ambari_hosts = {host.host_name for host in ambari.hosts} logger.debug('Cluster hosts: %s; Ambari hosts: %s', cluster_hosts, ambari_hosts) return cluster_hosts == ambari_hosts wait_for_condition(condition=condition, condition_args=[ambari, cluster]) logger.info('Updating install repo to use %s HDP version ...', args.hdp_version) # based off of release notes of https://bit.ly/2R06NKp if stack_version_tuple >= (2, 6): url = join_url_parts(hdp_repo_url, 'build.id') response = requests.get(url) response.raise_for_status() build_number = next( (int(item.split(':')[1].strip()) for item in response.text.split('\n') if 'BUILD_NUMBER' in item), None) if not build_number: raise Exception( 'Could not determine build number as required for repo setting. Build data found: ', response.text) # version_definitions not yet supported by Ambari client library - a TODO hdp_repo_version = '{}-{}'.format(args.hdp_version, build_number) version_definition = { 'VersionDefinition': { 'version_url': '{}/HDP-{}.xml'.format(hdp_repo_url, hdp_repo_version) } } url = join_url_parts(server_url, 'api', 'v1', 'version_definitions') data = json.dumps(version_definition) response = requests.post( url, data=data, auth=(DEFAULT_AMBARI_USERNAME, DEFAULT_AMBARI_PASSWORD), headers={'X-Requested-By': 'topology_hdp build'}) response.raise_for_status() else: hdp_os = ambari.stacks('HDP').versions( stack_version).operating_systems('redhat6') hdp_os.repositories('HDP-{}'.format(stack_version)).update( base_url=hdp_repo_url, verify_base_url=False) hdp_repo_version = None build_number = None logger.info('Creating `cluster` with pre-defined components ...') ambari.blueprints('cluster').create(blueprint_name='cluster', stack_version=stack_version, stack_name='HDP', host_groups=host_groups) logger.info('Installing cluster components ...') hdp_cluster = ambari.clusters('cluster') # INSTALL_ONLY option not applicable for <= 2.0.13.0 ver, it will be install and start services. if hdp_version_tuple <= (2, 0, 13, 0): hdp_cluster = hdp_cluster.create( blueprint='cluster', default_password='******', host_groups=DEFAULT_CLUSTER_HOST_MAPPING) elif hdp_repo_version: hdp_cluster = hdp_cluster.create( blueprint='cluster', default_password='******', repository_version=hdp_repo_version, host_groups=DEFAULT_CLUSTER_HOST_MAPPING, provision_action='INSTALL_ONLY') else: hdp_cluster = hdp_cluster.create( blueprint='cluster', default_password='******', host_groups=DEFAULT_CLUSTER_HOST_MAPPING, provision_action='INSTALL_ONLY') time.sleep( 30 ) # Some versions of Ambari provide wrong status on wait. Need to slug some time. hdp_cluster.wait(timeout=5400, interval=30) logger.info('Waiting for all hosts to reach healthy state ...') def condition(ambari): health_report = hdp_cluster.health_report logger.debug('Ambari cluster health report: %s ...', health_report) return health_report.get('Host/host_state/HEALTHY') == len( list(ambari.hosts)) wait_for_condition(condition=condition, condition_args=[ambari]) logger.info('Waiting for components to be verified ...') def condition(ambari): comps = hdp_cluster.cluster.host_components.refresh() for comp in comps: if comp.state.upper() == 'UNKNOWN': logger.debug('Not ready with component `%s` ...', comp.component_name) return False else: return True wait_for_condition(condition=condition, condition_args=[ambari]) hdp_services_state = set(service['state'] for service in hdp_cluster.services.to_dict()) if 'STARTED' in hdp_services_state or 'STARTING' in hdp_services_state: logger.info('Ambari task queued to stop services ...') hdp_cluster.cluster.services.stop().wait() logger.info('Stopping Ambari for saving to Docker image ...') for node in cluster: node.execute('ambari-agent stop', quiet=quiet) primary_node.execute('ambari-server stop', quiet=quiet) primary_node.execute('service postgresql stop', quiet=quiet) for node in cluster: node.execute('; '.join( ['yum clean all', 'cat /dev/null > ~/.bash_history && history -c']), quiet=quiet) repository = '{}/topology_hdp'.format(args.repository or defaults['DEFAULT_REPOSITORY']) tag_prefix = 'hdp{}_ambari{}'.format(args.hdp_version, args.ambari_version) primary_node_tag = '{}_{}'.format(tag_prefix, 'primary-node') secondary_node_tag = '{}_{}'.format(tag_prefix, 'secondary-node') logger.info('Committing the primary node container as %s %s', primary_node_tag, ('and pushing its image to {} ...'.format(repository) if args.push else '...')) primary_node.commit(repository=repository, tag=primary_node_tag, push=args.push) logger.info('Committing the secondary node container as %s %s', secondary_node_tag, ('and pushing its image to {} ...'.format(repository) if args.push else '...')) secondary_node.commit(repository=repository, tag=secondary_node_tag, push=args.push) if args.retain: logger.info('Starting Ambari ...') primary_node.execute('service postgresql start', quiet=quiet) primary_node.execute('ambari-server start', quiet=quiet) for node in cluster: node.execute('ambari-agent start', quiet=quiet) else: logger.info('Removing the containers ...') primary_node.stop() secondary_node.stop()
def main(args): quiet = not args.verbose print_topology_meta(args.topology) if args.include_services and args.exclude_services: raise ValueError( 'Cannot pass both --include-services and --exclude-services.') image_prefix = '{}/{}/topology_hdp:hdp{}_ambari{}'.format( args.registry, args.namespace or DEFAULT_NAMESPACE, args.hdp_version, args.ambari_version) primary_node_image = '{}_{}'.format(image_prefix, 'primary-node') secondary_node_image = '{}_{}'.format(image_prefix, 'secondary-node') clusterdock_config_host_dir = os.path.realpath( os.path.expanduser(args.clusterdock_config_directory)) volumes = [{clusterdock_config_host_dir: CLUSTERDOCK_CLIENT_CONTAINER_DIR}] primary_node = Node(hostname=args.primary_node[0], group='primary', volumes=volumes, image=primary_node_image, ports=[{ AMBARI_PORT: AMBARI_PORT } if args.predictable else AMBARI_PORT]) secondary_nodes = [ Node(hostname=hostname, group='secondary', volumes=volumes, image=secondary_node_image) for hostname in args.secondary_nodes ] cluster = Cluster(primary_node, *secondary_nodes) cluster.primary_node = primary_node cluster.secondary_nodes = secondary_nodes for node in cluster.nodes: node.volumes.append({'/sys/fs/cgroup': '/sys/fs/cgroup'}) # do not use tempfile.mkdtemp, as systemd wont be able to bring services up when temp ends to be created in # /var/tmp/ directory node.volumes.append(['/run', '/run/lock']) cluster.start(args.network) hdp_version_tuple = version_tuple(args.hdp_version) logger.debug('Starting PostgreSQL for Ambari server ...') # Need this as init system in Docker misreports on postgres start initially # Check https://github.com/docker-library/postgres/issues/146 for more def condition(): primary_node.execute('service postgresql restart', quiet=quiet) if '1 row' in primary_node.execute( 'PGPASSWORD=bigdata psql ambari ' '-U ambari -h localhost -c "select 1"', quiet=quiet).output: return True wait_for_condition(condition=condition, time_between_checks=2) def condition(): if 'running' in primary_node.execute('service postgresql status', quiet=quiet).output: return True wait_for_condition(condition=condition) time.sleep( 10 ) # If images are set to start Ambari server/agents - give some time to recover the right status _update_node_names(cluster, quiet=quiet) # The HDP topology uses two pre-built images ('primary' and 'secondary'). If a cluster # larger than 2 nodes is started, some modifications need to be done. if len(secondary_nodes) > 1: _remove_files(nodes=secondary_nodes[1:], files=['/hadoop/hdfs/data/current/*'], quiet=quiet) logger.info('Starting Ambari server ...') primary_node.execute('ambari-server start', quiet=quiet) # Docker for Mac exposes ports that can be accessed only with ``localhost:<port>`` so # use that instead of the hostname if the host name is ``moby``. hostname = ('localhost' if client.info().get('Name') == 'moby' else socket.getaddrinfo( socket.gethostname(), 0, flags=socket.AI_CANONNAME)[0][3]) port = cluster.primary_node.host_ports.get(AMBARI_PORT) server_url = 'http://{}:{}'.format(hostname, port) logger.info('Ambari server is now reachable at %s', server_url) logger.info('Starting Ambari agents ...') for node in cluster: logger.debug('Starting Ambari agent on %s ...', node.fqdn) node.execute('ambari-agent start', quiet=quiet) ambari = Ambari(server_url, username='******', password='******') def condition(ambari, cluster): cluster_hosts = {node.fqdn for node in cluster} ambari_hosts = {host.host_name for host in ambari.hosts} logger.debug('Cluster hosts: %s; Ambari hosts: %s', cluster_hosts, ambari_hosts) return cluster_hosts == ambari_hosts wait_for_condition(condition=condition, condition_args=[ambari, cluster]) service_types_to_leave = (args.include_services.upper().split(',') if args.include_services else []) service_types_to_remove = (args.exclude_services.upper().split(',') if args.exclude_services else []) if service_types_to_leave or service_types_to_remove: for service in list(ambari.clusters(DEFAULT_CLUSTER_NAME).services): service_name = service.service_name.upper() if (service_name in service_types_to_remove or (service_types_to_leave and service_name not in service_types_to_leave)): logger.info('Removing cluster service (name = %s) ...', service_name) service.delete() for node in secondary_nodes[1:]: logger.info('Adding %s to cluster ...', node.fqdn) ambari.clusters(DEFAULT_CLUSTER_NAME).hosts.create(node.fqdn) secondary_node = ambari.clusters(DEFAULT_CLUSTER_NAME).hosts( secondary_nodes[0].fqdn) for component in secondary_node.components: logger.debug('Adding component (%s) to cluster on host (%s) ...', component.component_name, node.fqdn) host_components = ambari.clusters(DEFAULT_CLUSTER_NAME).hosts( node.fqdn).components host_components.create(component.component_name).wait() logger.debug('Installing all registered components on host (%s) ...', node.fqdn) ambari.clusters(DEFAULT_CLUSTER_NAME).hosts( node.fqdn).components.install().wait() logger.info('Waiting for all hosts to reach healthy state ...') def condition(ambari): health_report = ambari.clusters(DEFAULT_CLUSTER_NAME).health_report logger.debug('Ambari cluster health report: %s ...', health_report) return health_report.get('Host/host_state/HEALTHY') == len( list(ambari.hosts)) wait_for_condition(condition=condition, condition_args=[ambari]) service_names = [ service['service_name'] for service in ambari.clusters( DEFAULT_CLUSTER_NAME).services.to_dict() ] if 'ATLAS' in service_names: logger.info('Configuring Atlas required properties ...') _configure_atlas(ambari, args.hdp_version, atlas_server_host=cluster.primary_node.fqdn) if 'HIVE' in service_names: primary_node.execute('touch /etc/hive/sys.db.created', quiet=quiet) logger.info('Waiting for components to be ready ...') def condition(ambari): comps = ambari.clusters( DEFAULT_CLUSTER_NAME).cluster.host_components.refresh() for comp in comps: if comp.state.upper() == 'UNKNOWN': logger.debug('Not ready with component `%s` ...', comp.component_name) return False else: return True wait_for_condition(condition=condition, condition_args=[ambari]) if not args.dont_start_cluster: logger.info('Starting cluster services ...') ambari.clusters(DEFAULT_CLUSTER_NAME).services.start().wait( timeout=3600) if 'HBASE' in service_names: logger.info('Starting Thrift server ...') if hdp_version_tuple <= (2, 0, 13, 0): hbase_daemon_path = '/usr/lib/hbase/bin/hbase-daemon.sh' else: hbase_daemon_path = '/usr/hdp/current/hbase-master/bin/hbase-daemon.sh' primary_node.execute('{} start thrift -p {} ' '--infoport {}'.format( hbase_daemon_path, HBASE_THRIFT_SERVER_PORT, HBASE_THRIFT_SERVER_INFO_PORT), quiet=quiet)
def amclient(config): ''' Take a config json as input, return a ambariclient.Client.Ambari instance''' client = Ambari(config['ambari']['host'], port=config['ambari']['port'], username=config['ambari']['user'], password=config['ambari']['pass']) return client
# Do some sanity checks on the config requiredAttribs = [ 'serviceName', 'package', 'components', 'configurations' ] for attrib in requiredAttribs: if not attrib in service_config: log.error("Invalid configuration. Missing required attribute '%s'", attrib) sys.exit(3) log.info('Installing service: %s on ambari host: %s', service_config['serviceName'], args.ambari_host) ambari_host_uri = URL(args.ambari_host) ambari_client = Ambari(ambari_host_uri.host(), port=ambari_host_uri.port(), protocol=ambari_host_uri.scheme(), username=args.username, password=args.password, identifier='hdiapps') # If this is being invoked from outside the cluster, we must fixup the href references contained within the responses ambari_client.client.request_params['hooks'] = dict( response=shared_lib.Fixup(ambari_host_uri).fixup) # Assume we only have 1 cluster managed by this Ambari installation cluster = ambari_client.clusters.next() log.debug('Cluster: %s, href: %s', cluster.cluster_name, cluster._href) # Pull in any extra dynamic configuration if args.extra_config: try: extra_config = json.loads(args.extra_config) log.debug( 'Applying dynamic service configuration specified on command-line: %s',
def main(cm_fqhn, cm_port, cm_user_name, cm_user_password, cm_cluster_name, cm_tls_enabled): print cm_fqhn, cm_port, cm_user_name, cm_user_password, cm_cluster_name, cm_tls_enabled cm_protocol = 'https' if cm_tls_enabled.lower() == 'false': cm_protocol = 'http' #print 'Protocol:', cm_protocol ambari_accessor = api_accessor(host=cm_fqhn, login=cm_user_name, password=cm_user_password, protocol=cm_protocol, port=cm_port) zookeeper_config = get_properties2(cluster=cm_cluster_name, config_type=CONFIG_TYPE_MAP['zookeeper'], accessor=ambari_accessor) zk_client_port = zookeeper_config.get(CONFIG_PROPERTY_MAP['zk_client_port']) if zk_client_port != None: CONFIG_KEY_VALUE_MAP['ZOOKEEPER_PORT'] = zk_client_port hdfs_site_config = get_properties2(cluster=cm_cluster_name, config_type=CONFIG_TYPE_MAP['hdfs'], accessor=ambari_accessor) #print "\nHDFS-SITE:\n", hdfs_site_config hdfs_nn_ns = hdfs_site_config.get(CONFIG_PROPERTY_MAP['hdf_nn_ns'], None) hdfs_nn_rpc = hdfs_site_config.get(CONFIG_PROPERTY_MAP['hdf_nn_rpc']) print '\nHDFS-SITE:', hdfs_nn_ns, hdfs_nn_rpc if hdfs_nn_ns == None: CONFIG_KEY_VALUE_MAP['NAME_NODE'] = 'hdfs://' + hdfs_nn_rpc else: CONFIG_KEY_VALUE_MAP['NAME_NODE'] = hdfs_nn_ns yarn_site_config = get_properties2(cluster=cm_cluster_name, config_type=CONFIG_TYPE_MAP['yarn'], accessor=ambari_accessor) #print "\nYARN-SITE:\n", yarn_site_config yarn_rm_address = yarn_site_config.get(CONFIG_PROPERTY_MAP['yarn_rm_address']) CONFIG_KEY_VALUE_MAP['JOB_TRACKER'] = yarn_rm_address hbase_site_config = get_properties2(cluster=cm_cluster_name, config_type=CONFIG_TYPE_MAP['hbase'], accessor=ambari_accessor) #print "\nHBASE-SITE:\n", hbase_site_config kafka_broker_config = get_properties2(cluster=cm_cluster_name, config_type=CONFIG_TYPE_MAP['kafka'], accessor=ambari_accessor) #print "\nKAFAKA_BROKER:\n", kafka_broker_config kafka_client_security_protocol = kafka_broker_config.get(CONFIG_PROPERTY_MAP['kafka_client_security_protocol']) #print 'Kafka protocol:', kafka_client_security_protocol if kafka_client_security_protocol != None: CONFIG_KEY_VALUE_MAP['KAFKA_SECURITY_PROTOCOL'] = kafka_client_security_protocol oozie_server_config = get_properties2(cluster=cm_cluster_name, config_type=CONFIG_TYPE_MAP['oozie'], accessor=ambari_accessor) #print "\nOOZIE_SERVER:\n", oozie_server_config oozie_url = oozie_server_config.get(CONFIG_PROPERTY_MAP['oozie_base_url']) CONFIG_KEY_VALUE_MAP['OOZIE_URL'] = oozie_url if cm_tls_enabled.lower() == 'false': api = Ambari(cm_fqhn, port=cm_port, username=cm_user_name, password=cm_user_password, validate_ssl=False) else: api = Ambari(cm_fqhn, port=cm_port, username=cm_user_name, password=cm_user_password, validate_ssl=False, protocol='https') # Get a list of all clusters cdh_cluster = None for c in api.clusters(): if c.cluster_name == cm_cluster_name: print '\nCluster:', c.cluster_name cdh_cluster = c for x in cdh_cluster.hosts(): HOST_MAP[x.host_name] = x.host_name print '\nHost Name Mapping:' print HOST_MAP print '\nServices:' for x in cdh_cluster.services(): print x.service_name #for x in cdh_cluster.configurations(): # print x.type #ZooKeeper zk_hosts = [] zk_service = cdh_cluster.services(SERVICE_NAME_MAP['zookeeper']) #print zk_service zk_server_cmps = zk_service.components(SERVICE_COMPONENT_NAME_MAP['zookeeper_server']) #print zk_server_cmps for x in zk_server_cmps.host_components: zk_hosts.append(x.host_name) #print 'ZOOKEEPER HOSTS:', zk_hosts if len(zk_hosts) > 0: CONFIG_KEY_VALUE_MAP['ZOOKEEPER_QUORUM'] = ' '.join(zk_hosts) #HDFS hdfs_nn_hosts = [] hdfs_service = cdh_cluster.services(SERVICE_NAME_MAP['hdfs']) hdfs_nn_cmps = hdfs_service.components(SERVICE_COMPONENT_NAME_MAP['namenode']) for x in hdfs_nn_cmps.host_components: hdfs_nn_hosts.append(x.host_name) #YARN RM yarn_rm_hosts = [] yarn_service = cdh_cluster.services(SERVICE_NAME_MAP['yarn']) yarn_rm_cmps = yarn_service.components(SERVICE_COMPONENT_NAME_MAP['resourcemanager']) for x in yarn_rm_cmps.host_components: yarn_rm_hosts.append(x.host_name) #print yarn_rm_hosts #OOZIE oozie_hosts = [] oozie_service = cdh_cluster.services(SERVICE_NAME_MAP['oozie']) oozie_server_cmps = oozie_service.components(SERVICE_COMPONENT_NAME_MAP['oozie_server']) for x in oozie_server_cmps.host_components: oozie_hosts.append(x.host_name) #print oozie_hosts #HBASE REST SERVER not managed by Ambari hbase_service = cdh_cluster.services(SERVICE_NAME_MAP['hbase']) #KAFKA kafka_broker_hosts = [] kafka_service = cdh_cluster.services(SERVICE_NAME_MAP['kafka']) kafka_broker_cmps = kafka_service.components(SERVICE_COMPONENT_NAME_MAP['kafka_broker']) for x in kafka_broker_cmps.host_components: kafka_broker_hosts.append(x.host_name) if len(kafka_broker_hosts) > 0: CONFIG_KEY_VALUE_MAP['KAFKA_BROKER'] = ' '.join(kafka_broker_hosts) #print kafka_broker_hosts # Print all print '\nOUTPUT:\n', CONFIG_KEY_VALUE_MAP
help="Ambari user password", required=True) parser.add_argument("-C", action="store", dest='cluster', help="Cluster name to check", required=True) parser.add_argument("-S", action="store", dest='service', help="Service name to check", required=True) args = parser.parse_args() client = Ambari(args.host, port=args.port, username=args.user_name, password=args.password) states = {'OK': 0, 'WARNING': 1, 'CRITICAL': 2, 'UNKNOWN': 3} #for a in client.clusters(args.cluster).services(args.service).alerts.to_dict(): # alerts = a.alerts.to_dict() alerts = client.clusters(args.cluster).services(args.service).alerts.to_dict() OK = [] WARNING = [] UNKNOWN = [] CRITICAL = [] MAINTENANCE = [] for a in alerts: if a['state'] == 'OK': OK.append((a['cluster_name'], a['service_name'], a['component_name'], a['definition_name'], a['text']))
def main(args): image_prefix = '{}/{}/topology_hdp:hdp{}_ambari{}'.format( args.registry, args.namespace or DEFAULT_NAMESPACE, args.hdp_version, args.ambari_version) primary_node_image = '{}_{}'.format(image_prefix, 'primary-node') secondary_node_image = '{}_{}'.format(image_prefix, 'secondary-node') primary_node = Node(hostname=args.primary_node[0], group='primary', image=primary_node_image, ports=[{ AMBARI_PORT: AMBARI_PORT } if args.predictable else AMBARI_PORT]) secondary_nodes = [ Node(hostname=hostname, group='secondary', image=secondary_node_image) for hostname in args.secondary_nodes ] cluster = Cluster(primary_node, *secondary_nodes) cluster.primary_node = primary_node cluster.secondary_nodes = secondary_nodes cluster.start(args.network) logger.debug('Starting PostgreSQL for Ambari server ...') primary_node.execute('service postgresql start', quiet=not args.verbose) _update_node_names(cluster, quiet=not args.verbose) # The HDP topology uses two pre-built images ('primary' and 'secondary'). If a cluster # larger than 2 nodes is started, some modifications need to be done. if len(secondary_nodes) > 1: _remove_files(nodes=secondary_nodes[1:], files=['/hadoop/hdfs/data/current/*']) logger.info('Starting Ambari server ...') primary_node.execute('ambari-server start', quiet=not args.verbose) # Docker for Mac exposes ports that can be accessed only with ``localhost:<port>`` so # use that instead of the hostname if the host name is ``moby``. hostname = 'localhost' if client.info().get( 'Name') == 'moby' else socket.gethostname() port = cluster.primary_node.host_ports.get(AMBARI_PORT) server_url = 'http://{}:{}'.format(hostname, port) logger.info('Ambari server is now reachable at %s', server_url) logger.info('Starting Ambari agents ...') for node in cluster: logger.debug('Starting Ambari agent on %s ...', node.fqdn) node.execute('ambari-agent start', quiet=not args.verbose) ambari = Ambari(server_url, username='******', password='******') def condition(ambari, cluster): cluster_hosts = {node.fqdn for node in cluster} ambari_hosts = {host.host_name for host in ambari.hosts} logger.debug('Cluster hosts: %s; Ambari hosts: %s', cluster_hosts, ambari_hosts) return cluster_hosts == ambari_hosts wait_for_condition(condition=condition, condition_args=[ambari, cluster]) for node in secondary_nodes[1:]: logger.info('Adding %s to cluster ...', node.fqdn) ambari.clusters('cluster').hosts.create(node.fqdn) for component in ambari.clusters('cluster').hosts( secondary_nodes[0].fqdn).components: logger.debug('Adding component (%s) to cluster on host (%s) ...', component.component_name, node.fqdn) host_components = ambari.clusters('cluster').hosts( node.fqdn).components host_components.create(component.component_name).wait() logger.debug('Installing all registered components on host (%s) ...', node.fqdn) ambari.clusters('cluster').hosts(node.fqdn).components.install().wait() if not args.dont_start_cluster: logger.debug( 'Waiting for all hosts to reach healthy state before starting cluster ...' ) def condition(ambari): health_report = ambari.clusters('cluster').health_report logger.debug('Ambari cluster health report: %s ...', health_report) return health_report.get('Host/host_state/HEALTHY') == len( list(ambari.hosts)) wait_for_condition(condition=condition, condition_args=[ambari]) logger.info('Starting cluster services ...') ambari.clusters('cluster').services.start().wait()
events.subscribe(models.Bootstrap, 'wait', bootstrap_done, events.states.FINISHED) events.subscribe(models.Host, 'wait', host_progress, events.states.PROGRESS) events.subscribe(models.Host, 'wait', host_done, events.states.FINISHED) config = get_default_config() config.update(parse_config_file()) config.update(parse_cli_opts()) config['validate_ssl'] = not config.pop('no_validate_ssl', False) if 'logger' in config: log(config.pop('logger')) ambari = Ambari(**config) try: version = ambari.version except Exception: # pylint: disable=broad-except traceback.print_exc() six.print_("\nCould not connect to Ambari server - aborting!") sys.exit(1) shell_help = "\n".join([ "Ambari client available as 'ambari'", " - Ambari Server is %s" % ambari.base_url, " - Ambari Version is %s\n" % utils.version_str(version), " - log(new_level) will reset the logger level", " - ambari_ref() will show you all available client method chains", ])