def run(): num_players = 0 num_players, plane_ids, start_time, num_views = init() if num_players > 0: wrapper.schedule(timeSlice) while man.proxy.alive(): setupWin(num_players, plane_ids, fs=fullscreen, num_views=num_views) glFinish() mesh.createVBOs(mesh.vbo_meshes) glFinish() ptrOn(mouse_cap) print 'run. before wrapper.run' wrapper.run() print 'run. after wrapper.run' glFinish() print 'run. done' flush_start = time.time() while not man.proxy.attemptSendAll(): if time() - flush_start > 3: break sleep(0) if man.proxy: try: man.proxy.join() assert not man.proxy.isAlive() except: print_exc() if man.server: if man.proxy: man.server.quit() try: while man.server.isAlive(): man.server.join(2) assert not man.server.isAlive() except KeyboardInterrupt, SystemExit: man.server.quit() man.server.join(4) except:
def run(): num_players=0 num_players, plane_ids, start_time, num_views=init() if num_players>0: wrapper.schedule(timeSlice) while man.proxy.alive(): setupWin(num_players, plane_ids, fs=fullscreen, num_views=num_views) glFinish() mesh.createVBOs(mesh.vbo_meshes) glFinish() ptrOn(mouse_cap) print 'run. before wrapper.run' wrapper.run() print 'run. after wrapper.run' glFinish() print 'run. done' flush_start=time.time() while not man.proxy.attemptSendAll(): if time()-flush_start>3: break sleep(0) if man.proxy: try: man.proxy.join() assert not man.proxy.isAlive() except: print_exc() if man.server: if man.proxy: man.server.quit() try: while man.server.isAlive(): man.server.join(2) assert not man.server.isAlive() except KeyboardInterrupt, SystemExit: man.server.quit() man.server.join(4) except:
def main(): options = get_options() cfgs = defaultdict(str) if options.cfgfile: if not os.path.exists(options.cfgfile): err_m('Cannot find config file \'%s\'' % options.cfgfile) config_file = options.cfgfile else: config_file = DBCFG_FILE if options.pwd: pwd = getpass.getpass('Input remote host SSH Password: '******'' if os.path.exists(config_file): cfgs = ParseInI(config_file).load() else: node_lists = expNumRe( raw_input( 'Enter list of Nodes separated by comma, support numeric RE, i.e. n[01-12]: ' )) # check if node list is expanded successfully if len([1 for node in node_lists if '[' in node]): err('Failed to expand node list, please check your input.') cfgs['node_list'] = ','.join(node_lists) results = wrapper.run(cfgs, options, mode='discover', pwd=pwd) format_output('Discover results') if len(results) > 4: output = output_row(results) else: output = output_column(results) print output with open('discover_result', 'w') as f: f.write('Discover Date: %s\n' % time.strftime('%Y-%m-%d %H:%M')) f.write(output)
def run(image1_path=None, image2_path=None): return wrapper.run(image1_path, image2_path, iters=20)
def main(): """ db_installer main loop """ global cfgs format_output('Trafodion Installation ToolKit') # handle parser option options = get_options() if options.build and options.cfgfile: log_err('Wrong parameter, cannot specify both --build and --config-file') if options.build and options.offline: log_err('Wrong parameter, cannot specify both --build and --offline') if options.cfgfile: if not os.path.exists(options.cfgfile): log_err('Cannot find config file \'%s\'' % options.cfgfile) config_file = options.cfgfile else: config_file = DBCFG_FILE if options.pwd: pwd = getpass.getpass('Input remote host SSH Password: '******'' # not specified config file and default config file doesn't exist either p = ParseInI(config_file) if options.build or (not os.path.exists(config_file)): if options.build: format_output('DryRun Start') user_input(options, prompt_mode=True, pwd=pwd) # save config file as json format print '\n** Generating config file to save configs ... \n' p.save(cfgs) # config file exists else: print '\n** Loading configs from config file ... \n' cfgs = p.load() if options.offline and cfgs['offline_mode'] != 'Y': log_err('To enable offline mode, must set "offline_mode = Y" in config file') user_input(options, prompt_mode=False, pwd=pwd) if options.upgrade: cfgs['upgrade'] = 'Y' if options.offline: http_start(cfgs['local_repo_dir'], cfgs['repo_port']) else: cfgs['offline_mode'] = 'N' if not options.build: format_output('Installation Start') ### perform actual installation ### wrapper.run(cfgs, options, pwd=pwd) format_output('Installation Complete') if options.offline: http_stop() # rename default config file when successfully installed # so next time user can input new variables for a new install # or specify the backup config file to install again try: # only rename default config file ts = time.strftime('%y%m%d_%H%M') if config_file == DBCFG_FILE and os.path.exists(config_file): os.rename(config_file, config_file + '.bak' + ts) except OSError: log_err('Cannot rename config file') else: format_output('DryRun Complete') # remove temp config file if os.path.exists(DBCFG_TMP_FILE): os.remove(DBCFG_TMP_FILE)
def user_input(options, prompt_mode=True, pwd=''): """ get user's input and check input value """ global cfgs apache = True if hasattr(options, 'apache') and options.apache else False offline = True if hasattr(options, 'offline') and options.offline else False silent = True if hasattr(options, 'silent') and options.silent else False # load from temp config file if in prompt mode if os.path.exists(DBCFG_TMP_FILE) and prompt_mode == True: tp = ParseInI(DBCFG_TMP_FILE) cfgs = tp.load() u = UserInput(options, pwd) g = lambda n: u.get_input(n, cfgs[n], prompt_mode=prompt_mode) ### begin user input ### if apache: g('node_list') node_lists = expNumRe(cfgs['node_list']) # check if node list is expanded successfully if len([1 for node in node_lists if '[' in node]): log_err('Failed to expand node list, please check your input.') cfgs['node_list'] = ','.join(node_lists) g('hadoop_home') g('hbase_home') g('hive_home') g('hdfs_user') g('hbase_user') g('first_rsnode') cfgs['distro'] = 'APACHE' else: g('mgr_url') if not ('http:' in cfgs['mgr_url'] or 'https:' in cfgs['mgr_url']): cfgs['mgr_url'] = 'http://' + cfgs['mgr_url'] # set cloudera default port 7180 if not provided by user if not re.search(r':\d+', cfgs['mgr_url']): cfgs['mgr_url'] += ':7180' g('mgr_user') g('mgr_pwd') validate_url_v1 = '%s/api/v1/clusters' % cfgs['mgr_url'] content = ParseHttp(cfgs['mgr_user'], cfgs['mgr_pwd']).get(validate_url_v1) # currently only CDH support multiple clusters # so if condition is true, it must be CDH cluster if len(content['items']) > 1: cluster_names = [] # loop all managed clusters for cluster in content['items']: cluster_names.append(cluster['name']) for index, name in enumerate(cluster_names): print str(index + 1) + '. ' + name g('cluster_no') c_index = int(cfgs['cluster_no']) - 1 if c_index < 0 or c_index >= len(cluster_names): log_err('Incorrect number') cluster_name = cluster_names[int(c_index)] else: try: cluster_name = content['items'][0]['name'] except (IndexError, KeyError): cluster_name = content['items'][0]['Clusters']['cluster_name'] discover = HadoopDiscover(cfgs['mgr_user'], cfgs['mgr_pwd'], cfgs['mgr_url'], cluster_name) rsnodes = discover.get_rsnodes() hadoop_users = discover.get_hadoop_users() cfgs['distro'] = discover.distro cfgs['hbase_service_name'] = discover.get_hbase_srvname() cfgs['hdfs_service_name'] = discover.get_hdfs_srvname() cfgs['zookeeper_service_name'] = discover.get_zookeeper_srvname() cfgs['cluster_name'] = cluster_name.replace(' ', '%20') cfgs['hdfs_user'] = hadoop_users['hdfs_user'] cfgs['hbase_user'] = hadoop_users['hbase_user'] cfgs['node_list'] = ','.join(rsnodes) cfgs['first_rsnode'] = rsnodes[0] # first regionserver node # check node connection for node in cfgs['node_list'].split(','): rc = os.system('ping -c 1 %s >/dev/null 2>&1' % node) if rc: log_err('Cannot ping %s, please check network connection and /etc/hosts' % node) ### discover system settings, return a dict discover_results = wrapper.run(cfgs, options, mode='discover', pwd=pwd) # check discover results, return error if fails on any sinlge node need_java_home = 0 for result in discover_results: host, content = result.items()[0] content_dict = json.loads(content) java_home = content_dict['default_java'] if java_home == 'N/A': need_java_home += 1 if content_dict['linux'] == 'N/A': log_err('Unsupported Linux version') if content_dict['firewall_status'] == 'Running': log_err('Firewall should be stopped') if content_dict['traf_status'] == 'Running': log_err('Trafodion process is found, please stop it first') if content_dict['hbase'] == 'N/A': log_err('HBase is not found') if content_dict['hbase'] == 'N/S': log_err('HBase version is not supported') if content_dict['secure_hadoop'] == 'kerberos': cfgs['secure_hadoop'] = 'Y' else: cfgs['secure_hadoop'] = 'N' if offline: g('local_repo_dir') if not glob('%s/repodata' % cfgs['local_repo_dir']): log_err('repodata directory not found, this is not a valid repository directory') cfgs['offline_mode'] = 'Y' cfgs['repo_ip'] = socket.gethostbyname(socket.gethostname()) cfgs['repo_port'] = '9900' pkg_list = ['apache-trafodion'] # find tar in installer folder, if more than one found, use the first one for pkg in pkg_list: tar_loc = glob('%s/*%s*.tar.gz' % (INSTALLER_LOC, pkg)) if tar_loc: cfgs['traf_package'] = tar_loc[0] break g('traf_package') # get basename and version from tar filename try: pattern = '|'.join(pkg_list) cfgs['traf_basename'], cfgs['traf_version'] = re.search(r'.*(%s).*-(\d\.\d\.\d).*' % pattern, cfgs['traf_package']).groups() except: log_err('Invalid package tar file') #if float(cfgs['traf_version'][:3]) >= 2.2: # cfgs['req_java8'] = 'Y' #else: # cfgs['req_java8'] = 'N' g('traf_pwd') g('dcs_cnt_per_node') g('scratch_locs') g('traf_start') # kerberos if cfgs['secure_hadoop'].upper() == 'Y': g('kdc_server') g('admin_principal') g('kdcadmin_pwd') # ldap security g('ldap_security') if cfgs['ldap_security'].upper() == 'Y': g('db_root_user') g('db_admin_user') g('db_admin_pwd') g('ldap_hosts') g('ldap_port') g('ldap_identifiers') g('ldap_encrypt') if cfgs['ldap_encrypt'] == '1' or cfgs['ldap_encrypt'] == '2': g('ldap_certpath') elif cfgs['ldap_encrypt'] == '0': cfgs['ldap_certpath'] = '' else: log_err('Invalid ldap encryption level') g('ldap_userinfo') if cfgs['ldap_userinfo'] == 'Y': g('ldap_user') g('ldap_pwd') else: cfgs['ldap_user'] = '' cfgs['ldap_pwd'] = '' # DCS HA g('dcs_ha') cfgs['enable_ha'] = 'false' if cfgs['dcs_ha'].upper() == 'Y': g('dcs_floating_ip') g('dcs_interface') g('dcs_backup_nodes') # check dcs backup nodes should exist in node list if sorted(list(set((cfgs['dcs_backup_nodes'] + ',' + cfgs['node_list']).split(',')))) != sorted(cfgs['node_list'].split(',')): log_err('Invalid DCS backup nodes, please pick up from node list') cfgs['enable_ha'] = 'true' if need_java_home: g('java_home') else: # don't overwrite user input java home if not cfgs['java_home']: cfgs['java_home'] = java_home # set other config to cfgs if apache: cfgs['hbase_xml_file'] = cfgs['hbase_home'] + '/conf/hbase-site.xml' cfgs['hdfs_xml_file'] = cfgs['hadoop_home'] + '/etc/hadoop/hdfs-site.xml' else: cfgs['hbase_xml_file'] = '/etc/hbase/conf/hbase-site.xml' cfgs['req_java8'] = 'N' cfgs['traf_user'] = '******' cfgs['config_created_date'] = time.strftime('%Y/%m/%d %H:%M %Z') if not silent: u.notify_user()
def main(): """ db_installer main loop """ global cfgs format_output('Trafodion Installation ToolKit') # handle parser option options = get_options() if options.build and options.cfgfile: log_err( 'Wrong parameter, cannot specify both --build and --config-file') if options.build and options.offline: log_err('Wrong parameter, cannot specify both --build and --offline') if options.cfgfile: if not os.path.exists(options.cfgfile): log_err('Cannot find config file \'%s\'' % options.cfgfile) config_file = options.cfgfile else: config_file = DBCFG_FILE if options.pwd: pwd = getpass.getpass('Input remote host SSH Password: '******'' # not specified config file and default config file doesn't exist either p = ParseInI(config_file) if options.build or (not os.path.exists(config_file)): if options.build: format_output('DryRun Start') user_input(options, prompt_mode=True, pwd=pwd) # save config file as json format print '\n** Generating config file to save configs ... \n' p.save(cfgs) # config file exists else: print '\n** Loading configs from config file ... \n' cfgs = p.load() if options.offline and cfgs['offline_mode'] != 'Y': log_err( 'To enable offline mode, must set "offline_mode = Y" in config file' ) user_input(options, prompt_mode=False, pwd=pwd) if options.upgrade: cfgs['upgrade'] = 'Y' if options.offline: http_start(cfgs['local_repo_dir'], cfgs['repo_port']) else: cfgs['offline_mode'] = 'N' if not options.build: format_output('Installation Start') ### perform actual installation ### wrapper.run(cfgs, options, pwd=pwd) format_output('Installation Complete') if options.offline: http_stop() # rename default config file when successfully installed # so next time user can input new variables for a new install # or specify the backup config file to install again try: # only rename default config file ts = time.strftime('%y%m%d_%H%M') if config_file == DBCFG_FILE and os.path.exists(config_file): os.rename(config_file, config_file + '.bak' + ts) except OSError: log_err('Cannot rename config file') else: format_output('DryRun Complete') # remove temp config file if os.path.exists(DBCFG_TMP_FILE): os.remove(DBCFG_TMP_FILE)
def user_input(options, prompt_mode=True, pwd=''): """ get user's input and check input value """ global cfgs apache = True if hasattr(options, 'apache') and options.apache else False offline = True if hasattr(options, 'offline') and options.offline else False silent = True if hasattr(options, 'silent') and options.silent else False # load from temp config file if in prompt mode if os.path.exists(DBCFG_TMP_FILE) and prompt_mode == True: tp = ParseInI(DBCFG_TMP_FILE) cfgs = tp.load() u = UserInput(options, pwd) g = lambda n: u.get_input(n, cfgs[n], prompt_mode=prompt_mode) ### begin user input ### if apache: g('node_list') node_lists = expNumRe(cfgs['node_list']) # check if node list is expanded successfully if len([1 for node in node_lists if '[' in node]): log_err('Failed to expand node list, please check your input.') cfgs['node_list'] = ','.join(node_lists) g('hadoop_home') g('hbase_home') g('hive_home') g('hdfs_user') g('hbase_user') g('first_rsnode') cfgs['distro'] = 'APACHE' else: g('mgr_url') if not ('http:' in cfgs['mgr_url'] or 'https:' in cfgs['mgr_url']): cfgs['mgr_url'] = 'http://' + cfgs['mgr_url'] # set cloudera default port 7180 if not provided by user if not re.search(r':\d+', cfgs['mgr_url']): cfgs['mgr_url'] += ':7180' g('mgr_user') g('mgr_pwd') validate_url_v1 = '%s/api/v1/clusters' % cfgs['mgr_url'] content = ParseHttp(cfgs['mgr_user'], cfgs['mgr_pwd']).get(validate_url_v1) # currently only CDH support multiple clusters # so if condition is true, it must be CDH cluster if len(content['items']) > 1: cluster_names = [] # loop all managed clusters for cluster in content['items']: cluster_names.append(cluster['name']) for index, name in enumerate(cluster_names): print str(index + 1) + '. ' + name g('cluster_no') c_index = int(cfgs['cluster_no']) - 1 if c_index < 0 or c_index >= len(cluster_names): log_err('Incorrect number') cluster_name = cluster_names[int(c_index)] else: try: cluster_name = content['items'][0]['name'] except (IndexError, KeyError): cluster_name = content['items'][0]['Clusters']['cluster_name'] discover = HadoopDiscover(cfgs['mgr_user'], cfgs['mgr_pwd'], cfgs['mgr_url'], cluster_name) rsnodes = discover.get_rsnodes() hadoop_users = discover.get_hadoop_users() cfgs['distro'] = discover.distro cfgs['hbase_service_name'] = discover.get_hbase_srvname() cfgs['hdfs_service_name'] = discover.get_hdfs_srvname() cfgs['zookeeper_service_name'] = discover.get_zookeeper_srvname() cfgs['cluster_name'] = cluster_name.replace(' ', '%20') cfgs['hdfs_user'] = hadoop_users['hdfs_user'] cfgs['hbase_user'] = hadoop_users['hbase_user'] cfgs['node_list'] = ','.join(rsnodes) cfgs['first_rsnode'] = rsnodes[0] # first regionserver node # check node connection for node in cfgs['node_list'].split(','): rc = os.system('ping -c 1 %s >/dev/null 2>&1' % node) if rc: log_err( 'Cannot ping %s, please check network connection and /etc/hosts' % node) ### discover system settings, return a dict discover_results = wrapper.run(cfgs, options, mode='discover', pwd=pwd) # check discover results, return error if fails on any sinlge node need_java_home = 0 for result in discover_results: host, content = result.items()[0] content_dict = json.loads(content) java_home = content_dict['default_java'] if java_home == 'N/A': need_java_home += 1 if content_dict['linux'] == 'N/A': log_err('Unsupported Linux version') if content_dict['firewall_status'] == 'Running': log_err('Firewall should be stopped') if content_dict['traf_status'] == 'Running': log_err('Trafodion process is found, please stop it first') if content_dict['hbase'] == 'N/A': log_err('HBase is not found') if content_dict['hbase'] == 'N/S': log_err('HBase version is not supported') if content_dict['secure_hadoop'] == 'kerberos': cfgs['secure_hadoop'] = 'Y' else: cfgs['secure_hadoop'] = 'N' if offline: g('local_repo_dir') if not glob('%s/repodata' % cfgs['local_repo_dir']): log_err( 'repodata directory not found, this is not a valid repository directory' ) cfgs['offline_mode'] = 'Y' cfgs['repo_ip'] = socket.gethostbyname(socket.gethostname()) cfgs['repo_port'] = '9900' pkg_list = ['apache-trafodion'] # find tar in installer folder, if more than one found, use the first one for pkg in pkg_list: tar_loc = glob('%s/*%s*.tar.gz' % (INSTALLER_LOC, pkg)) if tar_loc: cfgs['traf_package'] = tar_loc[0] break g('traf_package') # get basename and version from tar filename try: pattern = '|'.join(pkg_list) cfgs['traf_basename'], cfgs['traf_version'] = re.search( r'.*(%s).*-(\d\.\d\.\d).*' % pattern, cfgs['traf_package']).groups() except: log_err('Invalid package tar file') #if float(cfgs['traf_version'][:3]) >= 2.2: # cfgs['req_java8'] = 'Y' #else: # cfgs['req_java8'] = 'N' g('traf_pwd') g('dcs_cnt_per_node') g('scratch_locs') g('traf_start') # kerberos if cfgs['secure_hadoop'].upper() == 'Y': g('kdc_server') g('admin_principal') g('kdcadmin_pwd') # ldap security g('ldap_security') if cfgs['ldap_security'].upper() == 'Y': g('db_root_user') g('db_admin_user') g('db_admin_pwd') g('ldap_hosts') g('ldap_port') g('ldap_identifiers') g('ldap_encrypt') if cfgs['ldap_encrypt'] == '1' or cfgs['ldap_encrypt'] == '2': g('ldap_certpath') elif cfgs['ldap_encrypt'] == '0': cfgs['ldap_certpath'] = '' else: log_err('Invalid ldap encryption level') g('ldap_userinfo') if cfgs['ldap_userinfo'] == 'Y': g('ldap_user') g('ldap_pwd') else: cfgs['ldap_user'] = '' cfgs['ldap_pwd'] = '' # DCS HA g('dcs_ha') cfgs['enable_ha'] = 'false' if cfgs['dcs_ha'].upper() == 'Y': g('dcs_floating_ip') g('dcs_interface') g('dcs_backup_nodes') # check dcs backup nodes should exist in node list if sorted( list( set((cfgs['dcs_backup_nodes'] + ',' + cfgs['node_list']).split(',')))) != sorted( cfgs['node_list'].split(',')): log_err('Invalid DCS backup nodes, please pick up from node list') cfgs['enable_ha'] = 'true' if need_java_home: g('java_home') else: # don't overwrite user input java home if not cfgs['java_home']: cfgs['java_home'] = java_home # set other config to cfgs if apache: cfgs['hbase_xml_file'] = cfgs['hbase_home'] + '/conf/hbase-site.xml' cfgs['hdfs_xml_file'] = cfgs[ 'hadoop_home'] + '/etc/hadoop/hdfs-site.xml' else: cfgs['hbase_xml_file'] = '/etc/hbase/conf/hbase-site.xml' cfgs['req_java8'] = 'N' cfgs['traf_user'] = '******' cfgs['config_created_date'] = time.strftime('%Y/%m/%d %H:%M %Z') if not silent: u.notify_user()