def cleanRPMmachine(self,ip): self.LoginTo(ip) logger.info("Clean reflex related stuff on %s"%ip, also_console=True) ssh.write("pkill -9 java") line = ssh.read_until_prompt(loglevel="DEBUG") ssh.write("id") line = ssh.read_until_prompt(loglevel="DEBUG") logger.console(line) ''' if line.find("402") != -1: logger.console("With reflex user") ssh.set_client_configuration(prompt="#") ssh.write("exit") logger.info("Exiting reflex-user",also_console=True) ssh.write("yum erase -y reflex-tm") line=ssh.read_until_prompt() logger.info(line,also_console=True) logger.console("Successfully erase reflex-tm") ssh.write("rm -rf /opt/reflex/*") line = ssh.read_until_prompt() logger.info(line, html=False, also_console=True) else: ''' ssh.write("yum erase -y reflex-tm") line = ssh.read_until_prompt() logger.info("Successfully erase reflex-tm",also_console=True) ssh.write("rm -rf /opt/reflex/*") line = ssh.read_until_prompt() logger.info("Successfully erase reflex directory",also_console=True)
def stop_django(self): """Stop the Django server.""" os.kill(self.django_pid, signal.SIGKILL) logger.console( "Django stopped (PID: %s)" % self.django_pid, ) logger.console("-" * 78)
def adbCmd(cmd,Device_ID ,timeout=300): #print "*WARN* Danger Will Robinson" logger.console("Execute command: "+"adb -s " + Device_ID + " " + cmd) print "Execute command: "+"adb -s " + Device_ID + " " + cmd proc=subprocess.Popen("adb -s " + Device_ID + " " + cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) return_string=getStrFromExctCMD(proc) return return_string
def set_configuraciones_navegador(self, tipo_nav): if tipo_nav.upper() == "FIREFOX": # Copiamos la configuracion por defecto del navegador self._capabilites = DesiredCapabilities.FIREFOX.copy() # Sistema operativo self._capabilites["platform"] = "ANY" # Nombre del navegador self._capabilites["browsername"] = "firefox" # Version del navegador self._capabilites["version"] = "41.0.2" # JavaScript activado o no self._capabilites["javascriptEnabled"] = True # Navegador marionette activado o no self._capabilites["marionette"] = False elif tipo_nav.upper() == "CHROME": logger.console("#TODO Using broowser Chrome") elif tipo_nav.upper() == "PHANTOMJS": logger.console("#TODO Using browser Phantomjs") return self._capabilites
def ConfigureCollector(self): self.LoginTo(self.collector_ip) logger.info("Now installing rpms and configuring Collector on %s"%self.collector_ip, also_console=True) ssh.write("id") line = ssh.read_until_prompt() logger.console(line) ssh.write("yum install -y reflex-collector") output = ssh.read_until_prompt(loglevel="INFO") for line in output.splitlines(): line = line.strip(" ") if line.find("already installed and latest version") != -1: logger.console("reflex-collector already installed", newline="yes", stream="stdout") elif line.find("failed") != -1: logger.console("reflex-collector failed during installation", newline="yes", stream="stdout") logger.error("failure", html=True) elif line.find("Error: Package:") != -1: m1 = re.search('Error: Package:\s.*\s\(') if m1: package = m1.group() logger.console(line, newline="yes", stream="stdout") logger.error(package, html=True) logger.info("Reflex-collector installed successfully",also_console=True) #check reflex rpm also. Not remembered why ?? self.SwitchToReflex() self.ReflexKeysGeneration() self.cliPrompt() ssh.write("show pm process collector") output = ssh.read_until_prompt(loglevel="INFO") logger.console(output)
def cliPrompt(self,cluster="yes"): logger.info("Switching to cli Prompt",also_console=True) ssh.set_client_configuration(prompt="#") ssh.write("cli -m config",loglevel="DEBUG") output = ssh.read_until_prompt(loglevel="DEBUG") logger.info("Showing Prompt %s"%output,also_console=True) if output.find("% Unrecognized command") != -1: logger.warn("You are logging from Root user", html=True) self.SwitchToReflex() ssh.set_client_configuration(prompt="#") ssh.write("cli -m config",loglevel="DEBUG") output = ssh.read_until_prompt(loglevel="DEBUG") if cluster == "no": m1 = re.search("^(?P<cliPrompt>\S+\s\(\S+\)\s*\#)\s*",output.strip(),re.MULTILINE) else: for lines in output.splitlines(): m1 = re.search("^(?P<cliPrompt>\S+\s\[\S+:\s\S+\]\s\(\S+\)\s*\#)\s*",lines.strip(),re.MULTILINE) #logger.console(lines, newline=True, stream='stdout') #m1 = re.search("^(?P<cliPrompt>.*\$\s.*config\S+\s\(\S+\)\s*\#)\s*",output.strip(),re.MULTILINE) #logger.console(m1) if m1: cliPrompt = m1.group("cliPrompt") logger.console(cliPrompt) else: logger.warn("Not able to grep cliPrompt") ssh.set_client_configuration(prompt=cliPrompt)
def hadoopNamespace_Updation(): Login_To(NN_IP,un="admin",pw="admin@123") var = robo.get_variable_value("${shellPrompt}") ssh.set_client_configuration(prompt=var) conf = HAdoopConf() #logger.console(conf) logger.console(conf["journal"]) ssh.write("hadoop fs -touchz /data/abcabc")
def input_random_password(self, locator, name, errorLocator, expectedErrorMsg): global randomPassword randomPassword = self.methodObj.getRandomPassword(name) logger.console('Random Generated Password is : %s' %randomPassword) self.sl.element_should_be_visible(locator) self.sl.input_text(locator, randomPassword) if(len(randomPassword) < 6): self._validate_message(errorLocator, expectedErrorMsg) return randomPassword
def project_config(self): if self._project_config is None: if CURRENT_TASK.stack and isinstance(CURRENT_TASK.stack[0], Robot): # If CumulusCI is running a task, use that task's config return CURRENT_TASK.stack[0].project_config else: logger.console("Initializing CumulusCI config\n") self._project_config = CliRuntime().project_config return self._project_config
def _close(self): try: self.release_locks() self.release_value_set() except RuntimeError as err: # This is just last line of defence # Ignore connection errors if library server already closed logger.console("pabot.PabotLib#_close: threw an exception: is --pabotlib flag used? ErrorDetails: {0}".format(repr(err)), stream='stderr') pass
def __on_response_diagnosis(self, client, userdata, msgtop): """ on_response_diagnosis """ logger.console(self._tag + "===> on_response_diagnosis") if self._msg_id != msgtop.message_head.message_id: logger.warn(self._tag + "on_response_diagnosis: Not expected msg_id") return if msgtop.HasField("remote_diagnosis_response"): self._result = msgtop.diagnosis_response.ack.status self._event.set()
def open_browser_and_login_then_navigate_to_manage_users_page( self, browser, ip, username, password): self.driver = driver_factory.getDriver(browser, ip, username, password) sleep(5) # Navigate to Manage Users page logger.console("| Navigating to: " + self.driver.current_url + "MVC/User/ManageUsers") self.driver.get(self.driver.current_url + "MVC/User/ManageUsers")
def project_config(self): if self._project_config is None: if CURRENT_TASK and isinstance(CURRENT_TASK, Robot): # If CumulusCI is running a task, use that task's config return CURRENT_TASK.project_config else: logger.console('Initializing CumulusCI config\n') self._project_config = CliConfig().project_config return self._project_config
def say_hi(self, name): """ Say hi with name Examples: | Say hi | name 1 | | Say hi | name 2 | """ self._hello.set_name(name) logger.console('Say hi with %s' %(name))
def run(command, msg=u"", check=True, log=False, console=False): """Wrapper around subprocess.check_output that can tolerates nonzero RCs. Stderr is redirected to stdout, so it is part of output (but can be mingled as the two streams are buffered independently). If check and rc is nonzero, RuntimeError is raised. If log (and not checked failure), both rc and output are logged. Logging is performed on robot logger. By default .debug(), optionally .console() instead. The default log message is optionally prepended by user-given string, separated by ": ". Commands given as single string are not supported, for safety reasons. Invoke bash explicitly if you need its glob support for arguments. :param command: List of commands and arguments. Split your long string. :param msg: Message prefix. Argument name is short just to save space. :param check: Whether to raise if return code is nonzero. :param log: Whether to log results. :param console: Whether use .console() instead of .debug(). Mainly useful when running from non-main thread. :type command: Iterable or OptionString :type msg: str :type check: bool :type log: bool :type console: bool :returns: rc and output :rtype: 2-tuple of int and str :raises RuntimeError: If check is true and return code non-zero. :raises TypeError: If command is not an iterable. """ if isinstance(command, OptionString): command = command.parts if not hasattr(command, u"__iter__"): # Strings are indexable, but turning into iterator is not supported. raise TypeError(f"Command {command!r} is not an iterable.") ret_code = 0 output = u"" try: output = subprocess.check_output(command, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as err: output = err.output ret_code = err.returncode if check: raise RuntimeError( MESSAGE_TEMPLATE.format(com=err.cmd, ret=ret_code, out=output)) if log: message = MESSAGE_TEMPLATE.format(com=command, ret=ret_code, out=output) if msg: message = f"{msg}: {message}" if console: logger.console(message) else: logger.debug(message) return ret_code, output
def just_call(self): """ Basic call keyword. Example: | Just Call | """ logger.info("Hello World") logger.console("Hello world")
def step1_check_componentstatus(): stdout = ex.execute_unix_command( "kubectl get componentstatus -o json | jq .items[].conditions[].type") logger.console('\n') for line in stdout.split('\n'): if "Healthy" in line: logger.console(line) else: raise Exception(line)
def check_search_functionality(self, keyword): """ Asserts if the search functionality works. If it doesn't work it raises an AssertionError. :param keyword: Keyword to be searched for """ if self.read_result_row_one().lower() == keyword.lower(): robologger.console(f"{keyword} is equal to {self.read_result_row_one()}") else: raise AssertionError(f"Expected would be that the search keyword {keyword} to be equal with the search results {self.read_result_row_one()}")
def _toggle_bridge_mode(self, enable=None): #feng add logger.console( "\n For Toggle Bridge mode--> use selenium PortalWebGUICore.py :line 494\n" ) self._linkElement("LAN IP") time.sleep(2) if self._isCheckboxChecked("togglecomp") is not enable: self._btnElement("togglecomp", by_id=True)
def initiate_config(): """ utlility function to load configuration file :return: None """ root_dir = get_project_root() logger.console(root_dir) config_file = os.path.join(root_dir, 'config/config.yaml') load_config(config_file)
def log_and_raise(self, exc_msg): """Log to console, on fail_on_mismatch also raise runtime exception. :param exc_msg: The message to include in log or exception. :type exc_msg: str :raises RuntimeError: With the message, if fail_on_mismatch. """ logger.console("RuntimeError:\n{m}".format(m=exc_msg)) if self.fail_on_mismatch: raise RuntimeError(exc_msg)
def fetch_the_filename_and_return_version(dict): if len(version_list) > 2: del version_list[0:2] logger.console("Initialized the version list") if re.match( "Smart Component for HPE Synergy 12Gb SAS Connection Module Firmware|Smart Component for HPE Synergy D3940 Storage Module firmware", dict['name']): logger.console(dict['name']) version_list.append(dict['componentVersion']) return version_list
def adbCmd(cmd, Device_ID, timeout=300): #print "*WARN* Danger Will Robinson" logger.console("Execute command: " + "adb -s " + Device_ID + " " + cmd) print "Execute command: " + "adb -s " + Device_ID + " " + cmd proc = subprocess.Popen("adb -s " + Device_ID + " " + cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) return_string = getStrFromExctCMD(proc) return return_string
def error(self): error = ' ' try: assert '<div class="alert alert-danger hidden"' in self.driver.page_source except AssertionError: logger.error('Error') errors = self.driver.find_elements_by_xpath( 'id("content")/div/article/div') for error in errors: logger.console(error.text)
def _toggle_dynamic_wan_ip(self, enable): #feng add 2017/1/5 logger.console("\nTrue = Enable / False = Disable\n") logger.console(enable) self._linkElement("WAN IP") time.sleep(2) self._linkElement("Connection Accessibility") time.sleep(2) if self._isCheckboxChecked("togglecomp") is not enable: self._btnElement("togglecomp", by_id=True)
def on_heartbeat(self): """ callback function for when a heartbeat is received """ logger.console("=> Hearbeat") if "Heartbeat" in self.__awaited_msgs: self.__msgq.put(call.HeartbeatPayload()) return call_result.HeartbeatPayload( current_time=datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S') + "Z" )
def read_songs_from_target_playlist(self): """ Reads the songs from a playlist. If the list of songs is empty then an assertion error is raised. :return: """ if len(self.SABL.songs_in_playlist()) > 0: robologger.console(self.SABL.read_songs_from_playlist()) else: raise AssertionError("the playlist should have contained songs, instead it did not.")
def new_playlist_creation_validation(self, playlist_name): """ Validates that the new playlist was indeed created :param playlist_name: String containing the name of the newly created playlist """ try: if self.SABL.playlist(playlist_name) == playlist_name: robologger.console("The song was present in the playlist list. Test has passed.") except Exception: raise AssertionError(f"The newly created playlist called: {playlist_name} should have been present in the list. It was not.")
def _parse_arg(self, arg): """ Highly simple parser to parse out action ('of', 'from', ... ) from noun (<element> or <screenarea>) """ noun = arg while noun: action, sep, noun = noun.partition(' ') print action, sep, noun logger.console("\n%s :: %s :: %s" % (action, sep, noun))
def _set_data_file(self, config_file, run_name, data_file): doc = et.parse(path.join(self.saw_home, config_file)) # find the setup used by the specified run run = doc.find('runs/run[@name="{0}"]'.format(run_name)) if run.attrib['datafile'] != data_file: logger.console( 'Setting data file in {0} for run {1} to {2}'.format( config_file, run_name, data_file)) run.attrib['datafile'] = data_file doc.write(path.join(self.saw_home, config_file))
def _toggle_dhcp_server(self, enable=None): #feng add logger.console( "toggile dhcp server , in the PortalWebGUICore.py - lin 542\n") self._linkElement("LAN IP") time.sleep(2) #feng ----modify if self._isCheckboxChecked('(//input[@id=\'togglecomp\'])[3]', is_xpath=True) is not enable: self._xpathElement('(//input[@id=\'togglecomp\'])[3]', None)
def __init__(self): Driver.__init__(self) self.driver = self.driver() self.driver.get( 'https://vebd2.vertexinc.com/vertex/sanity-sso/auth/login.html?originalUrl=https://vebd2.vertexinc.com/vertex/sanity/ve/#/login' ) self.driver.maximize_window() userName = self.driver.find_element(locators['login.username'][0], locators['login.username'][1]) userName.send_keys('sri') passWord = self.driver.find_element(locators['login.password'][0], locators['login.password'][1]) passWord.send_keys('iL0veTax') signInButton = self.driver.find_element( locators['login.signin.button'][0], locators['login.signin.button'][1]) signInButton.click() signInButton.click() adhocButton = self.driver.find_element(locators['adhoc.button'][0], locators['adhoc.button'][1]) highlight(adhocButton) adhocButton.click() xPathToDBName = "//*[@id='select-database']/select/option[text()='%s']" % 'open_yesu_test' try: element = WebDriverWait(self.driver, 10).until( EC.presence_of_element_located((By.ID, "select-database"))) finally: logger.console("Element not found!") self.driver.quit() selectDB = self.driver.find_element(By.XPATH, xPathToDBName) highlight(selectDB) selectDB.click() xPathToTable = "//*[@id='select-table']/select/option[text()='%s']" % 'open_dan' selectTable = self.driver.find_element(By.XPATH, xPathToTable) selectTable.click() time.sleep(5) runAdhocQuery = self.driver.find_element( locators['adhoc.runAdhocQuery'][0], locators['adhoc.runAdhocQuery'][1]) highlight(runAdhocQuery) runAdhocQuery.click() inputs = self.driver.find_elements_by_xpath( 'body.desktop-detected.ng-scope:nth-child(2) div.ng-scope:nth-child(1) div.ng-scope:nth-child(2) div.animated.fadeIn.ng-scope section.ng-scope:nth-child(5) div.row:nth-child(1) article.col-xs-12.col-sm-12.col-md-12.col-lg-12.sortable-grid.ui-sortable div.jarviswidget.jarviswidget-sortable div.widget-body.no-padding div.smart-form.ng-valid.ng-valid-pattern.ng-valid-maxlength.ng-dirty.ng-valid-parse div.row:nth-child(3) section.col.col-12 div.fixed-table-container table.table.table-bordered.table-striped tbody.ng-scope:nth-child(2) tr.vs-repeat-repeated-element.ng-scope:nth-child(4) > td:nth-child(7)' ) for input in inputs: print(inputs.text)
def kafka_CPU_validation(self, *args, **kwargs): banner("Kafka CPU validation") '''args: hostip ''' self._load_kwargs(kwargs) logger.console("self.tun_interface_state: {}".format( self.tun_interface_state)) try: if self.tun_interface_state == "UP": cmd = "docker exec kafka kafka-avro-console-consumer --topic cpu --bootstrap-server localhost:9092|head -20|grep {}|wc -l".format( self.host_name) kafka_CPU_validation = cli_run(cmd=cmd, host_ip=self.hostip, linux_user=self.username, linux_password=self.password) serialised_kafka_CPU_validation = self._serialize_response( time.time(), kafka_CPU_validation) print("serialised_kafka_CPU_validation is:{}".format( serialised_kafka_CPU_validation)) self.cmd_output = str(serialised_kafka_CPU_validation['Result'] ['stdout']).replace('\n', '').strip() if int(self.cmd_output) > 0: return True else: return False elif self.tun_interface_state == "DOWN": cmd = "docker exec kafka kafka-avro-console-consumer --topic cpu --bootstrap-server localhost:9092|head -20|grep {}|wc -l".format( self.host_name) kafka_CPU_validation = cli_run(cmd=cmd, host_ip=self.hostip, linux_user=self.username, linux_password=self.password) serialised_kafka_CPU_validation = self._serialize_response( time.time(), kafka_CPU_validation) print("serialised_kafka_CPU_validation is:{}".format( serialised_kafka_CPU_validation)) self.cmd_output = str(serialised_kafka_CPU_validation['Result'] ['stdout']).replace('\n', '').strip() if int(self.cmd_output) == 0: return True else: return False else: return False except Exception as e: logger.console( "Error in kafka_CPU_validation status: {}".format(e))
def HAdoopShellCmds(cmd): #logger.warn(robo.get_library_instance('SSHLibrary')) #ssh = robo.get_library_instance('SSHLibrary') Login_To(NN_IP,un="admin",pw="admin@123") journalnodes = [] host_mapping = IpHostName() var = robo.get_variable_value("${shellPrompt}") ssh.set_client_configuration(prompt=var) #logger.info(cmd,also_console=True) pmxcmd = "pmx subshell hadoop_yarn %s"%cmd logger.info(pmxcmd,also_console=True) ssh.write(pmxcmd) output = ssh.read_until_prompt() if pmxcmd.find("stop") != -1: for line in output.splitlines(): if line.find("Stopping JournalNode on Host:") != -1: match = re.search('.*:\s(.*)$',line) if match: nodes = str(match.group(1)) journalnodes.append(nodes) else: logger.error("Journal nodes cannt be stopped from Subshell commands") #logger.console(host_mapping) #logger.console(host_mapping["EIGHTY-DN82"]) #logger.info(host_mapping[journalnodes[0]], also_console = True) logger.console(journalnodes,stream='stdout') for i in journalnodes: output = executeRemote("ps -aef | grep -i journalnode | grep -v grep | awk '{print $2 \":\" $9 \":\" $12 \":\" $22}'", host_mapping[i]) #logger.info(output,also_console=True) for line in output.splitlines(): if line.find("Dproc_journalnode") != -1: logger.warn("Journal Nodes are still UP", html=True) return False elif pmxcmd.find("start") != -1: for line in output.splitlines(): if line.find("Starting JournalNode on Host:") != -1: match = re.search('.*:\s(.*)$',line) if match: nodes = str(match.group(1)) journalnodes.append(nodes) else: logger.error("Journal nodes cannot be stopped from Subshell commands") logger.console(journalnodes,stream='stdout') for i in journalnodes: #logger.info(i,also_console=True) #logger.info(host_mapping[i],also_console=True) output = executeRemote("ps -aef | grep -i journalnode | grep -v grep | awk '{print $2 \":\" $9 \":\" $12 \":\" $22}'", host_mapping[i]) #logger.info(output,also_console=True) for line in output.splitlines(): if line.find("Dproc_journalnode") != -1: logger.warn("Journal Nodes are Running", html=True) return True
def add_policy(self, *args, **kwargs): banner("PCC.Create Policy") self._load_kwargs(kwargs) conn = BuiltIn().get_variable_value("${PCC_CONN}") logger.console("Kwargs: {}".format(kwargs)) print("Kwargs in create policy are: {}".format(kwargs)) if 'appId' not in kwargs: return "Provide a valid appName/appId to create policy" if 'appId' in kwargs: if type(kwargs['appId']) == str: print("I am here") if kwargs['appId'].isnumeric(): self.appId = ast.literal_eval(kwargs['appId']) else: print("I am in else1") self.appId = kwargs['appId'] else: print("I am in else2") self.appId = int(kwargs['appId']) print("app id is: {}".format(self.appId)) if 'inputs' in kwargs: user_inputs = ast.literal_eval(kwargs['inputs']) default_inputs = self.get_policy_inputs_from_apps(**kwargs) if default_inputs == "Provide a valid appName/appId to create policy": return "Provide a valid appName/appId to create policy" for user_input in user_inputs: for default_input in default_inputs: if user_input['name'] == default_input['name']: default_input['value'] = user_input['value'] self.inputs = default_inputs else: self.inputs = self.get_policy_inputs_from_apps(**kwargs) if self.inputs == "Provide a valid appName/appId to create policy": return "Provide a valid appName/appId to create policy" if 'scopeIds' in kwargs: self.scopeIds = ast.literal_eval(self.scopeIds) print("Kwargs in create policy are:{}".format(kwargs)) print( "inputs is :{} and appId is {} and scopeID is {} and description is {}" .format(self.inputs, self.appId, self.scopeIds, self.description)) payload = { "appId": self.appId, "scopeIDs": self.scopeIds, "description": self.description, "inputs": self.inputs, "owner": self.owner } print("Payload is :{}".format(payload)) logger.console("payload:-" + str(payload)) return pcc.add_policy(conn, payload)
def __on_response_remote_config(self, client, userdata, msgtop): """ on_response_remote_config """ logger.console(self._tag + "on_response_remote_config called") if self._msg_id != msgtop.message_head.message_id: logger.warn(self._tag + "on_response_remote_config: Not expected msg_id") return if msgtop.HasField("remote_config_result"): # TODO: Handle receive mulit-config_items for config in msgtop.remote_config_result.config_results: self._result = config.result self._event.set()
def write_msg(self, basic): msg = TPCANMsg() msg.ID = basic.get_id() msg.LEN = basic.get_length() msg.MSGTYPE = PCAN_MESSAGE_STANDARD data = basic.encode() for i in range(msg.LEN): msg.DATA[i] = int(data[i], 16) result = self._pcanbasic.Write(self._channel, msg) if result != PCAN_ERROR_OK: logger.console("write_msg: " + self.__get_formated_error(result))
def execute_command(self, sandbox_id, resource_name, command_name, params): logger.console(resource_name) headers = self._get_headers(self.auth_token) component_url = self._get_component_url(self.api_v2, headers, sandbox_id, resource_name) execution_url = self._start_execution(self.api_v2, headers, component_url, command_name, params) return self._get_execution_result(self.api_v2, headers, execution_url)['output']
def get_variables(environment, on_demand, browser): variables = {"Environment": environment, "On Demand": on_demand, "Browser": browser } passedENV = str(environment).upper() print passedENV ondem = str(on_demand).upper() # browser= str(browser).upper() print ondem print browser if (passedENV.upper() == 'LOCAL'): url = 'localhost:10088' adminurl = 'localhost:9090' elif(passedENV == 'PROD'): url = 'https://arcade.rally.com' else: url = 'https://' + passedENV.lower() + '-arcade.werally.in' print "url is" +url ENVS = { 'RALLY_TEST_URL': url } print url # Checks to see if the Environment and OnDemand variables are used if re.match('\\$\\{.*\\}', ondem) != None: env = COMMON.keys()[0] logger.info(msg_prefix + 'Unresolved variable passed in, defaulting to ' + ondem) if not COMMON.has_key(ondem): raise Exception( 'Undefined on demand: ' + str(on_demand) + '. Try one of these: ' + ', '.join( sorted(COMMON.keys()))) logger.info(msg_prefix + 'Defining arcade web variables for environment ' + passedENV) logger.console(msg_prefix + 'Defining arcade web variables for environment ' + passedENV) if ondem == 'FALSE': print "ondem" +ondem print dict(list(COMMON[ondem].items())) print list(ENVS.items()) return dict(list(COMMON[ondem].items()) +list(ENVS.items())) else: print "ondem" +ondem print dict(list(COMMON[ondem][browser].items()) +list(ENVS.items())) return dict(list(COMMON[ondem][browser].items()) +list(ENVS.items()))
def init_serialrobot(self,port,baudrate=115200,timeout=0.5): '''open serialcom and init serialrobot''' ret = 0 try: port = str(port) baudrate = int(baudrate) super(serialrobot,self).__init__(port, baudrate,timeout=timeout) except (serial.SerialException,serial.portNotOpenError),e: logger.console("%s cant't be opened" % port) print "port %s" % port,type(port) print "baudrate %s" % baudrate, type(baudrate) ret = -1
def setup_framework(self, nodes): """Pack the whole directory and extract in temp on each node.""" tarball = self.__pack_framework_dir() logger.console('Framework packed to {0}'.format(tarball)) remote_tarball = "/tmp/{0}".format(basename(tarball)) for node in nodes.values(): self.__copy_tarball_to_node(tarball, node) self.__extract_tarball_at_node(remote_tarball, node) logger.trace('Test framework copied to all topology nodes') self.__delete_local_tarball(tarball)
def __extract_tarball_at_node(self, tarball, node): logger.console('Extracting tarball to {0} on {1}'.format( con.REMOTE_FW_DIR, node['host'])) ssh = SSH() ssh.connect(node) cmd = 'rm -rf {1}; mkdir {1} ; sudo -Sn tar -zxf {0} -C {1};'.format( tarball, con.REMOTE_FW_DIR) (ret_code, stdout, stderr) = ssh.exec_command(cmd, timeout=30) if 0 != ret_code: logger.error('Unpack error: {0}'.format(stderr)) raise Exception('Failed to unpack {0} at node {1}'.format( tarball, node['host']))
def send_command(self,command,pass_args=[],fail_args=[],timeout=0,wait_idle=1): """ send command cmd: command will send to serialcom. It represents key maps if starting with '@' exec_env: command running environment. LINUX or UBOOT pass_arg: serilrobot will return index+1,successful,when detect these strings.Multiple strings separated by ';'. "SKIP" means skip this field. fail_arg: serilrobot will return -(index+1),fail,when detect these strings. Multiple strings separated by ';'."SKIP" means skip this field. timeout_arg: serialrobot will interrupt command after timeout """ if not self.port_valid: logger.console("port isn't available,skip command: %s" % command) raise RuntimeError cmd = command.encode('ascii','ignore') pass_args = pass_args.encode('ascii','ignore') fail_args = fail_args.encode('ascii','ignore') timeout = timeout if isinstance(timeout,int) else int(timeout) wait_idle = wait_idle if isinstance(wait_idle,int) else int(wait_idle) if pass_args.upper() == "SKIP" or pass_args == "": pset = [] else: pset = pass_args.split(';') if fail_args.upper() == "SKIP" or fail_args == "": fset = [] else: fset = fail_args.split(';') print "-" * 20 print "[%s]" % cmd print pset print fset print "timeout = %d" % timeout print "wait = %d" % wait_idle if wait_idle: self._wait_port_idle() ret = self._send_cmd(cmd, pset, fset, timeout) if ret == 0: logger.info("command:%s timeout;send ctrl + c to stop!!" % cmd) self.send_ctrl_c() return ret
def run_task(self, task_name, **options): """ Runs a named CumulusCI task for the current project with optional support for overriding task options via kwargs. Examples: | =Keyword= | =task_name= | =task_options= | =comment= | | Run Task | deploy | | Run deploy with standard options | | Run Task | deploy | path=path/to/some/metadata | Run deploy with custom path | """ task_config = self.project_config.get_task(task_name) class_path = task_config.class_path logger.console("\n") task_class, task_config = self._init_task(class_path, options, task_config) return self._run_task(task_class, task_config)
def ColOutputDirect(): Login_To(NN_IP,un="admin",pw="admin@123") var = robo.get_variable_value("${shellPrompt}") ssh.set_client_configuration(prompt=var) adaptors = [] ssh.write("echo 'show running-config'|/opt/tms/bin/cli -m config") output = ssh.read_until_prompt() for line in output.splitlines(): line = line.strip() if line.find("collector modify-instance 1 add-adaptor ipfix") != -1: logger.console("Got ipfix as adaptors") items = line.split(" ") if items[4] == "ipfix": logger.console(adaptors) if "ipfix" in adaptors: logger.console("Ipfix configured") if line.find("collector modify-instance 1 modify-adaptor ipfix output-directory") != -1: items = line.split(" ") output_directory = items[6] else: adaptors.append(items[4]) logger.console("Inside else part") if "ipfix" in adaptors == -1: #ConfigureCollector() output_directory = "/data/collector" print output_directory return output_directory
def AppStart(self,appName): if appName == "Acume": ssh.write("pm process rubix restart") ssh.write("rubix status") output = ssh.read_until_prompt() logger.console(output) #print ssh.close_connection() #if __name__ == "__main__": # a = RPMSetupConfigure() # a.cleanRPMmachine()
def get_port_pid(self, port): """根据参数中的端口号查找对应使用该端口号的进程ID,并返回该进程的PID号。 """ get_pid_cmd = "netstat -ano | findstr LISTENING | findstr " + str(port) appium_pid_str = os.popen(get_pid_cmd).read() if appium_pid_str: appium_g = appium_pid_str.split(' ') logger.console("Process about port " + str(port) + " have existed, is " + appium_g[-1] + ".", True, 'stdout') return appium_g[-1] else: logger.console("No process about port " + str(port) + "!", True, 'stdout') return None
def stop_tookit(self, toolniki): """停止测试工具,例如本地 Appium """ if "appium" == toolniki: pid = self.get_port_pid(config.APPIUMPORT) logger.info("Going to stop local appium, pid: ."+str(pid), also_console=True) stop_appium_cmd = "tskill " + str(pid) child = subprocess.Popen(stop_appium_cmd, shell=True) child.wait() return child.pid else: logger.console("No other tools to use.")
def connect_to_target(self, host, username, password): target = _Target(host, username, password) i = 0 while not target.conn_open: try: target.conn.open_connection(target.host, "cla", target.properties['port']) target.conn_open = True except Exception, error: i += 1 msg = 'Opening connection to %s failed: ' % HOST + str(error) if error[0] == self.CONNECTION_REFUSED and i < target.properties['max_conn_attempts']: logger.console("Trying to reconnect in 5 seconds") time.sleep(5) else: raise AssertionError(msg)
def hadoopEpochValue(): Login_To(NN_IP,un="admin",pw="admin@123") var = robo.get_variable_value("${shellPrompt}") ssh.set_client_configuration(prompt=var) conf = HAdoopConf() host_mapping = IpHostName() EpochValues = {} for i in conf["journal"]: output = executeRemote("cat /data/yarn/journalnode/%s/current/last-promised-epoch"%conf["nameservice"], host_mapping[i]) match = re.search(r'^(\d).*',output) lastPromisedEpoch = match.group(1) logger.console(i) EpochValues[i] = lastPromisedEpoch epochValue_nodes = set(tuple(str(k)) for k in EpochValues.values()) logger.console(epochValue_nodes)
def get_webdriver_remote(self): remote_url = self.get_remote_url() session_id = self.get_session_id() s = 'from selenium import webdriver;' \ 'd=webdriver.Remote(command_executor="%s",' \ 'desired_capabilities={});' \ 'd.session_id="%s"' % ( remote_url, session_id ) logger.console("\nDEBUG FROM CONSOLE\n%s\n" % (s)) logger.info(s) return s
def run_task_class(self, class_path, **options): """ Runs a CumulusCI task class with task options via kwargs. Use this keyword to run logic from CumulusCI tasks which have not been configured in the project's cumulusci.yml file. This is most useful in cases where a test needs to use task logic for logic unique to the test and thus not worth making into a named task for the project Examples: | =Keyword= | =task_class= | =task_options= | | Run Task Class | cumulusci.task.utils.DownloadZip | url=http://test.com/test.zip dir=test_zip | """ logger.console("\n") task_class, task_config = self._init_task(class_path, options, TaskConfig()) return self._run_task(task_class, task_config)
def ConfigureOozie(self): self.LoginTo(self.oozie_ip) print self.Namenode_ip[0] #if self.oozie_ip == self.Namenode_ip[0]: #self.CreateReflexUser(self.oozie_ip) ssh.write("rpm -qa | grep reflex-workflowmanager") output = ssh.read_until_prompt(loglevel="DEBUG") if output.find("reflex-workflowmanager") == -1: ssh.write("yum install -y reflex-workflowmanager") output = ssh.read_until_prompt(loglevel="DEBUG") logger.console(output) self.SwitchToReflex() logger.info("Configuring oozie on %s"%self.oozie_ip,also_console=True) ssh.write("pmx register oozie") output = ssh.read_until_prompt() logger.info(output , html=True, also_console=True)
def conf(): var = robo.get_variable_value("${shellPrompt}") ssh.set_client_configuration(prompt=var) ssh.write("echo 'show hosts'|/opt/tms/bin/cli -m config ") output = ssh.read_until_prompt() ip_mapping = {} hadoop_conf = {} clients = [] journalnodes = [] slaves = [] for i in output.splitlines(): match = re.search('(?P<ipAddress>\d{0,3}\.\d{0,3}\.\d{0,3}\.\d{0,3})\s+-->(?P<hostname>.*)$',i) if match: ip_mapping[match.group("hostname")] = match.group("ipAddress") ssh.write("echo 'internal query iterate subtree /tps/process/hadoop_yarn/attribute'|/opt/tms/bin/cli -m config") output = ssh.read_until_prompt() for line in output.splitlines(): if line.find("/tps/process/hadoop_yarn/attribute/client/values") != -1: client = line.split("=")[1].strip("(string)").strip() clients.append(client) elif line.find("/tps/process/hadoop_yarn/attribute/config_ha/value") != -1: value = line.split("=")[1].strip("(string)").strip() hadoop_conf["config_ha"] = value elif line.find("/tps/process/hadoop_yarn/attribute/journalnodes/values") != -1: journal = line.split("=")[1].strip("(string)").strip() journalnodes.append(journal) elif line.find("/tps/process/hadoop_yarn/attribute/namenode1/value") != -1: value = line.split("=")[1].strip("(string)").strip() hadoop_conf["namenode1"] = value elif line.find("/tps/process/hadoop_yarn/attribute/namenode2/value") != -1: value = line.split("=")[1].strip("(string)").strip() hadoop_conf["namenode2"] = value elif line.find("/tps/process/hadoop_yarn/attribute/nameservice/value") != -1: value = line.split("=")[1].strip("(string)").strip() hadoop_conf["nameservice"] = value elif line.find("/tps/process/hadoop_yarn/attribute/slave/values") != -1: slave = line.split("=")[1].strip("(string)").strip() slaves.append(slave) elif line.find("/tps/process/hadoop_yarn/attribute/state/value") != -1: value = line.split("=")[1].strip("(string)").strip() hadoop_conf["state"] = value hadoop_conf["client"] = clients hadoop_conf["journal"] = journalnodes hadoop_conf["slaves"] = slaves logger.console(hadoop_conf)
def send_command_get_output(self,command,timeout=2,wait_idle=0): """ hello """ if not self.port_valid: logger.console("port isn't available,skip command: %s" % command) raise RuntimeError cmd = command.encode('ascii','ignore') timeout = timeout if isinstance(timeout,int) else int(timeout) wait_idle = wait_idle if isinstance(wait_idle,int) else int(wait_idle) if wait_idle: self._wait_port_idle() ret = self._send_cmd_get_output(cmd, timeout) return ret
def ConfigureCluster(self): logger.console(self.clusterNodes) ClusterNodes_ip = self.clusterNodes for node in ClusterNodes_ip: self.LoginTo(node) logger.info("Configuring Cluster on %s"%node,also_console=True) ssh.write("id") line = ssh.read_until_prompt() logger.console(line) self.CreateReflexUser(node) self.SwitchToReflex() self.cliPrompt(cluster="no") ssh.write("cluster expected-nodes 2") ssh.write("cluster interface eth0") ssh.write("cluster id %s"%self.clusterid) ssh.write("cluster name %s"%self.clusterid) ssh.write("cluster master address vip %s /24"%self.cluster_vip) ssh.write("cluster master interface eth0") ssh.write("cluster enable")
def ConfigureNN(self): logger.console("Configuring Namenode on %s"%self.configs["Namenode_IP"]) Namenodes_ip = self.configs["Namenode_IP"] for node in Namenodes_ip: #logger.info(node,also_console=True) #self.cleanRPMmachine(node) self.LoginTo(node) logger.info("Now installing rpms and configuring Namenode Machine on %s"%node,also_console=True) ssh.write("id") line = ssh.read_until_prompt() logger.console(line) self.CreateReflexUser(self.gms_ip) #self.SwitchToReflex() #logger.console() logger.info("Installing reflex-etc-tmo on %s"%node,also_console=True) ssh.write("yum install -y reflex-etc-tmo") output = ssh.read_until_prompt() logger.info("Installing reflex-hadoop-namenode on %s"%node,also_console=True) ssh.write("yum install -y reflex-hadoop-namenode") #ssh.write("yum install -y hadoop") output = ssh.read_until_prompt() self.SwitchToReflex() self.ReflexKeysGeneration() logger.info("Configuring Yarn on %s"%node,also_console=True) ssh.write("pmx register hadoop_yarn") ssh.write("pmx set hadoop_yarn config_ha False") ssh.write("pmx set hadoop_yarn monitor ON") ssh.write("pmx set hadoop_yarn namenode1 rpm-nn-102") ssh.write("pmx set hadoop_yarn state UNINIT") ssh.write("pmx set hadoop_yarn client %s"%self.collector_ip) ssh.write("pmx set hadoop_yarn slave %s"%self.datanodes_ip[0]) ssh.write("pmx set hadoop_yarn slave %s"%self.datanodes_ip[1]) ssh.write("pmx set hadoop_yarn dnprofile compute_RPM_NN") output = ssh.read_until_prompt() logger.console(output) ssh.write("pmx set hadoop_yarn queue.default.capacity 50") output = ssh.read_until_prompt() logger.console(output) ssh.write("pmx set hadoop_yarn queue.default.maximum_capacity 100") ssh.write("pmx set hadoop_yarn queue.query.capacity 20") ssh.write("pmx set hadoop_yarn queue.query.maximum_capacity 100") ssh.write("pmx set hadoop_yarn queue.rge.capacity 30") ssh.write("pmx set hadoop_yarn queue.rge.maximum_capacity 100") ssh.write("pmx set hadoop_yarn yarn.scheduler.maximum-allocation-mb 81920") ssh.write("pmx subshell hadoop_yarn set config dnprofile compute_RPM_NN attribute property yarn.nodemanager.resource.cpu-vcores 40") ssh.write("pmx subshell hadoop_yarn set config dnprofile compute_RPM_NN attribute property yarn.nodemanager.vmem-pmem-ratio 4") ssh.write("pmx subshell hadoop_yarn set config dnprofile compute_RPM_NN attribute property yarn.nodemanager.resource.memory-mb 81920") output = ssh.read_until_prompt() logger.info(output) ssh.write("exit")
def load_fixture(self, fixture): """Load a Django fixture into the database. """ args = [ 'python', self.manage, 'loaddata', '%s' % (fixture), '--settings=%s' % self.settings, ] out = subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, stderr = out.communicate() if stderr: logger.console(stderr) else: logger.console(stdout)