def main(): module = AnsibleModule(argument_spec=MODULE_ARGUMENTS) host_a = module.params.get('host', None) pass_a = module.params.get('pass', None) user_a = module.params.get('user', None) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) result = sock.connect_ex((host_a, 7183)) if result == 0: # Port is open then use https port api = ApiResource(server_host=host_a, username=user_a, password=pass_a, use_tls=True, version=9) else: # Port is not open then use http port api = ApiResource(server_host=host_a, username=user_a, password=pass_a, version=9) cdh_config = CdhConfiguration(api, CLUSTER_NAME) module.exit_json(changed=True, msg='Collected', config=cdh_config.get_config_for_cluster(CLUSTER_NAME))
def cm_api_resource(self): ar = None try: ar = ApiResource(self.cm_host, self.cm_port, self.cm_username, self.cm_password) ar.echo('Authenticated') # Issue a sample request to test the conn except ApiException, aexc: if aexc.code == 401: log.debug("Changing default API username to {0}".format(self.cm_username)) self.cm_username = self.host_username self.cm_password = self.host_password ar = ApiResource(self.cm_host, self.cm_port, self.cm_username, self.cm_password) else: log.error("Api Exception connecting to ClouderaManager: {0}".format(aexc))
def init_cluster(): # wait for all cloudera agent processes to come up BDVLIB_ServiceWait( [["services", "cloudera_scm_agent", NODE_GROUP_ID, "kts"]]) # make sure cloudera manager has received registration # for all new agents all_cloudera_hosts = get_hosts_for_service( ["services", "cloudera_scm_agent"]) api = ApiResource(CM_HOST, username="******", password="******") while True: current_all_hosts = map(lambda x: x.hostname, api.get_all_hosts()) setup_logger.info("Currently registered hosts with CM " + str(current_all_hosts)) if all(x in current_all_hosts for x in all_cloudera_hosts): break setup_logger.info( "waiting for new nodes to register with cloudera manager") time.sleep(10) manager = api.get_cloudera_manager() manager.update_config(CM_CONFIG) cluster = api.create_cluster(CLUSTER_NAME, CDH_MAJOR_VERSION, CDH_FULL_VERSION) KTS_HOSTS = ConfigMeta.getWithTokens( ['nodegroups', NODE_GROUP_ID, 'roles', 'kts', 'fqdns']) cluster.add_hosts(KTS_HOSTS) return (cluster, manager)
def main(): """ Add peer to the cluster. @rtype: number @returns: A number representing the status of success. """ settings = parse_args() if len(sys.argv) == 1 or len(sys.argv) > 17: print_usage_message() quit(1) api_target = ApiResource(settings.server, settings.port, settings.username, settings.password, settings.use_tls, 14) type_name = 'AWS_ACCESS_KEY_AUTH' account_configs = { 'aws_access_key': settings.aws_access_key, 'aws_secret_key': settings.aws_secret_key } try: api_target.create_external_account(settings.account_name, settings.account_name, type_name, account_configs=account_configs) print "S3 Account Successfully Added" except ApiException as error: if 'already exists' in str(error): print 'Peer Already exists' else: raise error return 0
def main(args): cm_host = get(args, 1, "localhost") cm_user = get(args, 2, "admin") cm_pass = get(args, 3, "admin") api = ApiResource(cm_host, username=cm_user, password=cm_pass) dump(api)
def init_cluster(): # wait for all cloudera agent processes to come up setup_logger.info("Creating Clutser.") BDVLIB_ServiceWait([["services", "cloudera_scm_agent", NODE_GROUP_ID]]) # make sure cloudera manager has received registration # for all new agents all_cloudera_hosts = get_hosts_for_service( ["services", "cloudera_scm_agent"]) api = ApiResource(CM_HOST, username=ADMIN_USER, password=ADMIN_PASS) while True: current_all_hosts = map(lambda x: x.hostname, api.get_all_hosts()) setup_logger.info("Currently registered hosts with CM " + str(current_all_hosts)) if all(x in current_all_hosts for x in all_cloudera_hosts): break setup_logger.info( "waiting for new nodes to register with cloudera manager") time.sleep(10) manager = api.get_cloudera_manager() manager.update_config(CM_CONFIG) cluster = api.create_cluster(CLUSTER_NAME, CDH_MAJOR_VERSION, CDH_FULL_VERSION) cluster.add_hosts(ALL_HOSTS) # turn off host swap alerting hosts_swap_alert_off(api) setup_logger.info("Setting Up SPARK2 Repo....") add_spark2_repo(api) ##Set java home setup_logger.info("Setting Up Java Path....") hosts_set_javahome(api) return (cluster, manager)
def main(): parser = cm_args_parser() args = parser.parse_args() print "Connecting to CM on host " + args.cm_host + "... ", api = ApiResource(args.cm_host, username=args.cm_user, password=args.cm_password) print "done."
def main(): """ Add peer to the cluster. @rtype: number @returns: A number representing the status of success. """ settings = parse_args() if len(sys.argv) == 1 or len(sys.argv) > 17: print_usage_message() quit(1) api_target = ApiResource(settings.server, settings.port, settings.username, settings.password, settings.use_tls, 14) cloudera_manager = api_target.get_cloudera_manager() try: cloudera_manager.create_peer(settings.peer_name, settings.source_cm_url, settings.source_user, settings.source_password) print "Peer Successfully Added" except ApiException as error: if 'already exists' in str(error): print 'Peer Already exists' else: raise error return 0
def get_hosts(self): hosts = {} from cm_api.api_client import ApiResource api = ApiResource(self.host, self.port, self.username, self.password) for h in api.get_all_hosts(): hosts[h.hostId] = h.ipAddress return hosts
def main(): #print sys.argv[0] #for i in range(1, len(sys.argv)): # print "param ", i, sys.argv[i] # get a handle on the instance of CM that we have running api = ApiResource(cm_host, cm_port, cm_username, cm_password, version=13) # get the CM instancepython2.7 setuptools cm = ClouderaManager(api) cluster = api.get_cluster(cluster_name) # distribution_parcels(api, cluster) cmd = cluster.first_run() while cmd.success == None: cmd = cmd.fetch() if cmd.success != True: print "The first run command failed: " + cmd.resultMessage() exit(0) print "First run successfully executed. Your cluster has been set up!"
def do_call(host, port, user, password, cluster_name, service_role_name, random_index): api = ApiResource(host, port, user, password, False, MAN_API_VERSION) for cluster in api.get_all_clusters(): if cluster_name is None: break elif cluster_name == cluster.name: break if cluster_name is not None and cluster_name != cluster.name: print >> sys.stderr, "Cloud not find cluster: " + cluster_name return -2 do_print_header() for service in cluster.get_all_services(): do_print_line_item(api, service, service_role_name, random_index, 'HDFS', 'NAMENODE', 'namenode_port', [], []) do_print_line_item(api, service, service_role_name, random_index, 'KUDU', 'KUDU_MASTER', 'webserver_port', [], []) do_print_line_item(api, service, service_role_name, random_index, 'HUE', 'HUE_SERVER', 'hue_http_port', [], []) do_print_line_item(api, service, service_role_name, random_index, 'HIVE', 'HIVESERVER2', 'hs2_thrift_address_port', [], []) do_print_line_item(api, service, service_role_name, random_index, 'IMPALA', 'IMPALAD', 'beeswax_port', [], []) do_print_line_item(api, service, service_role_name, random_index, 'FLUME', 'AGENT', 'agent_http_port', [], []) do_print_line_item(api, service, service_role_name, random_index, 'KAFKA', 'KAFKA_BROKER', 'port', [], []) do_print_line_item(api, service, service_role_name, random_index, 'ZOOKEEPER', 'SERVER', 'clientPort', [], []) do_print_footer()
def connect(cm_api, cm_username, cm_password, use_proxy=False): ''' Wait for ten minutes for CM to come up ''' for _ in xrange(120): try: logging.info("Checking CM availability....") # change name of proxy if necessary proxy = urllib2.ProxyHandler({'http': 'proxy'}) api = ApiResource(cm_api, username=cm_username, password=cm_password, version=14) if use_proxy: # pylint: disable=W0212 api._client._opener.add_handler(proxy) cloudera_manager = api.get_cloudera_manager() api.get_user(cm_username) return api, cloudera_manager except Exception: logging.warning("CM is not up") time.sleep(5) logging.error("CM did not come UP") sys.exit(-1)
def runner(self, args, display=True): ''' Main section. ''' plugin_args = args.split() if args is not None and (len(args.strip()) > 0) else "" options = self._read_args(plugin_args) api = ApiResource(server_host=options.cmhost, server_port=options.cmport, username=options.cmuser, password=options.cmpassword, version=11) def fetch(key, query): ''' Do the work of getting a metric from CM, run me over a list of metrics to fetch ''' try: items = api.query_timeseries(query) value = items[0].timeSeries[-1].data[-1].value service = items[0].timeSeriesQuery.split('=')[1].strip() source = items[0].timeSeries[-1].metadata.attributes['serviceName'] return Event(TIMESTAMP_MILLIS(), source, 'hadoop.%s.%s' % (service, key), [], value) except: return None events = filter(None, map(lambda metric: fetch(*metric), self._metrics.iteritems())) if display: self._do_display(events) return events
def setup_api_resources(self): self.api = ApiResource(server_host=self.cm_server_address, server_port=self.cm_server_port, username=self.username, password=self.password, version=self._get_api_version()) self.cm = self.api.get_cloudera_manager() self.cluster = self.api.get_cluster('Cluster 1 (clusterdock)')
def main(): args = parse_args() api = ApiResource(args.host, username=args.user, password=args.password) cluster = find_cluster(api, args.cluster) check_new_service_does_not_exist(api, cluster, args.service_name) if args.subparsers_name == "clone": template_service = find_template_service(api, cluster, args.based_on) else: master_host = find_master_host(api, args.host, args.master_host) deps = find_dependencies(args, cluster) parcel = get_best_parcel(api, cluster) if not parcel: ensure_parcel_repo_added(api) for attempt in xrange(1, MAX_PARCEL_REPO_WAIT_SECS + 1): parcel = get_best_parcel(api, cluster) if parcel: break print "Could not find parcel in attempt %d, will sleep and retry" % (attempt,) time.sleep(1) else: raise Exception("No parcel showed up in %d seconds" % (MAX_PARCEL_REPO_WAIT_SECS,)) ensure_parcel_activated(cluster, parcel) if args.subparsers_name == "create": create_new_service(api, cluster, args.service_name, deps, args.scratch_dirs, master_host) else: clone_existing_service(cluster, args.service_name, template_service)
def getActiveCMConfig(totalconfig): cmConfig = {} for cm in totalconfig['cmfqdn']: api = ApiResource(cm, totalconfig[cm]['port'], totalconfig[cm]['user'], totalconfig[cm]['passwd'], totalconfig[cm]['tls'], totalconfig[cm]['apiv']) clusters = api.get_all_clusters() cmConfig[cm] = {} for cluster in clusters: cmConfig[cm][cluster.displayName] = {} services = cluster.get_all_services() for service in services: cmConfig[cm][cluster.displayName][service.name] = {} cmConfig[cm][cluster.displayName][service.name]['Service'] = {} for name, config in service.get_config(view='full')[0].items(): cmConfig[cm][cluster.displayName][ service.name]['Service'][name] = { 'value': config.value, 'default': config.default } for roleGroup in service.get_all_role_config_groups(): cmConfig[cm][cluster.displayName][service.name][ roleGroup.roleType] = {} for name, config in roleGroup.get_config( view='full').items(): cmConfig[cm][cluster.displayName][service.name][ roleGroup.roleType][name] = { 'value': config.value, 'default': config.default } print(roleGroup.roleType) #print(json.dumps(cmConfig, indent=4)) return cmConfig
def create_cluster(): CM_HOST = (load_cfg(ansible_path+"/group_vars/all")).get("cm_host") USERNAME = (load_cfg(ansible_path+"/group_vars/all")).get("cm_username") PASSWORD = (load_cfg(ansible_path+"/group_vars/all")).get("cm_password") api = ApiResource(CM_HOST, version=API_VERSION, username=USERNAME, password=PASSWORD) cluster = api.get_cluster(CLUSTER_NAME) return cluster
def connect_cm(cm_api, cm_username, cm_password): api = ApiResource( cm_api, version=6, username=cm_username, password=cm_password) return api
def main(): module = build_module() choice_map = {'present': present, 'distributed': distributed, 'activated': activated, 'absent': absent, 'infos': infos} params = module.params has_changed = False api = ApiResource(params["cm_host"], username=params["cm_login"], password=params["cm_login"], version=params["api_version"]) try: cluster = api.get_cluster(params["cluster_name"]) except ApiException as e: module.fail_json(msg="Cluster error : {0}".format(e)) if params["product"] and params["version"]: parcel = get_parcel(cluster, params["product"], params["version"]) if params["state"] != "infos": error, has_changed, result, meta = choice_map.get(params['state'])(cluster, parcel) if error: module.fail_json(msg=result) module.exit_json(changed=has_changed, msg=result, meta=meta) else: meta = { "product": parcel.product, "version": parcel.version, "stage": parcel.stage } module.exit_json(changed=False, msg="Parcel informations gathered", meta=meta) elif not params["product"] and not params["version"] and params["state"] == "infos": module.exit_json(changed=has_changed, msg="Parcel informations gathered", meta=infos(cluster))
def setup(self, p_cm_host, p_cm_user, p_cm_pass, p_cm_version, p_cluster, p_cm_port=None, p_use_tls=False): self.cm_api = ApiResource(p_cm_host, server_port=p_cm_port, version=p_cm_version, username=p_cm_user, password=p_cm_pass, use_tls=p_use_tls) handler_cm_api.cluster_hosts = self.cm_api.get_all_hosts() if p_cluster: self.cluster = filter(lambda x: x.displayName == p_cluster, self.cm_api.get_all_clusters())[0] if not self.cluster: print("Error: That cluster is not valid.") return else: self.services = self.cluster.get_all_services() self.name = self.cluster.displayName tmp_topology = self.cluster.list_hosts() self.topology = {} for i in range(len(tmp_topology)): tmp_host = filter(lambda x: x.hostId == tmp_topology[i].hostId, handler_cm_api.cluster_hosts)[0] self.topology[tmp_topology[i].hostId] = tmp_host.hostname
def __init__(self,service,role,host,list): self.service = service.lower() self.role = role.lower() self.host = host.lower() self.list = list.lower() cm_host = '10.7.177.234' self.api = ApiResource(cm_host, username="******", password="******")
def main(): """ Kerberizes a cluster. @rtype: number @returns: A number representing the status of success. """ settings = retrieve_args() api = ApiResource(settings.host, settings.port, settings.username, settings.password, settings.use_tls, 8) cloudera_manager = api.get_cloudera_manager() cluster = api.get_cluster(settings.cluster) mgmt_service = cloudera_manager.get_service() if verify_cloudera_manager_has_kerberos_principal(cloudera_manager): wait_for_command('Stopping the cluster', cluster.stop()) wait_for_command('Stopping MGMT services', mgmt_service.stop()) configure_services(cluster) wait_for_generate_credentials(cloudera_manager) wait_for_command('Deploying client configs.', cluster.deploy_client_config()) wait_for_command('Deploying cluster client configs', cluster.deploy_cluster_client_config()) wait_for_command('Starting MGMT services', mgmt_service.start()) wait_for_command('Starting the cluster', cluster.start()) else: print "Cluster does not have Kerberos admin credentials. Exiting!" return 0
def get_api_handle(conf): host = conf['cm']['host'] user = conf['cm']['user'] password = conf['cm']['password'] use_tls = conf['cm']['ssl'] api = ApiResource(host, username=user, password=password, use_tls=use_tls) return api
def get_api(): api = ApiResource(server_host=CM_HOST, server_port=CM_PORT, username=ADMIN_USER, password=ADMIN_PASS, version=VERSION) return api
def main(): fields = { "cm_host": {"required": True, "type": "str"}, "cm_port": {"default": 7180, "type": "int"}, "cm_user": {"default": "admin", "type": "str"}, "cm_pass": {"required": True, "type": "str", "no_log": True}, "cm_api_version": {"default": 13, "type": "int"}, "service_type": {"required": True, "type": "str"}, "role_type": {"type": "str"}, "parameter": {"required": True, "type": "str"}, "value": {"required": False} } global module module = AnsibleModule(argument_spec=fields) api = ApiResource(module.params["cm_host"], module.params["cm_port"], module.params["cm_user"], module.params["cm_pass"], version=module.params["cm_api_version"]) settings = [] settings.append(module.params) updates = check_cloudera_settings(api, settings) changed = False if updates: changed = True module.exit_json(changed=changed, meta=updates)
def __init__(self, cm_host, cm_user, cm_pass, cluster_name=None): self.SERVICE_HIVE = 'HIVE' self.SERVICE_HUE = 'HUE' self.SERVICE_IMPALA = 'IMPALA' self.SERVICE_SOLR = 'SOLR' self.SERVICE_YARN = 'YARN' self.SERVICE_HDFS = 'HDFS' self.SERVICE_HBASE = 'HBASE' self.SERVICE_ZK = 'ZOOKEEPER' self.SERVICE_SENTRY = 'SENTRY' self.api = ApiResource( cm_host, username=cm_user, password=cm_pass, ) self.cluster = None self.services = {} for c in self.api.get_all_clusters(): if cluster_name is None or cluster_name == c.name: self.cluster = c break for service in self.cluster.get_all_services(): self.services[service.type] = service
def main(): # connect cm api api = ApiResource(CM_HOST, 7180, username=CM_USERNAME, password=CM_PASSWORD) manager = api.get_cloudera_manager() # no need to update cm config #manager.update_config(cm_host) print("[INFO] Connected to CM host on " + CM_HOST) # create cluster object try: cluster = api.get_cluster(name=CLUSTER_NAME) except: cluster = init_cluster(api, CLUSTER_NAME, CLUSTER_VERSION, CLUSTER_NODE_COUNT) print("[INFO] Initialized cluster " + CLUSTER_NAME + " which uses CDH version " + CLUSTER_VERSION) # mgmt_servicename = "MGMT" amon_role_name = "ACTIVITYMONITOR" apub_role_name = "ALERTPUBLISHER" eserv_role_name = "EVENTSERVER" hmon_role_name = "HOSTMONITOR" smon_role_name = "SERVICEMONITOR" nav_role_name = "NAVIGATOR" navms_role_name = "NAVIGATORMETADATASERVER" rman_role_name = "REPORTMANAGER" deploy_management(manager, mgmt_servicename, amon_role_name, apub_role_name, eserv_role_name, hmon_role_name, smon_role_name, nav_role_name, navms_role_name, rman_role_name) print("[INFO] Deployed CM management service " + mgmt_servicename + " to run on " + CM_HOST) # assign_roles(api, cluster) print("[INFO] all roles have assigned.") # # Custom role config groups cannot be automatically configured: Gateway Group 1 (error 400) try: cluster.auto_configure() except: pass update_custom_config(api, cluster) print("[INFO] all servies and roles have configured.") # cmd = cluster.first_run() while cmd.success == None: cmd = cmd.fetch() if not cmd.success: print("[ERROR] The first run command failed: " + cmd.resultMessage()) else: print( "[INFO] First run successfully executed. Your cluster has been set up!" )
def api(self): if self._api is None: self._api = ApiResource(self.config['cm']['host'], username=self.config['cm']['username'], password=self.config['cm']['password'], use_tls=self.config['cm'].get( 'tls', False)) return self._api
def list_hosts(host, username, password, cafile): context = ssl.create_default_context(cafile=cafile) api = ApiResource(host, username=username, password=password, use_tls=True, ssl_context=context) for h in api.get_all_hosts(): print h.hostname
def main(): API = ApiResource(CM_HOST, version=5, username=ADMIN_USER, password=ADMIN_PASS) print "Connected to CM host on " + CM_HOST CLUSTER = API.get_cluster(CLUSTER_NAME) print "About to stop cluster." CLUSTER.stop().wait() print "Done stopping cluster."