class SessionKeywords(LibraryComponent): def __init__(self, state): LibraryComponent.__init__(self, state) self._cache = ConnectionCache('No sessions.') @keyword('Create Session With Keys') def create_session_with_keys(self, region, access_key, secret_key): """Takes Region as an argument and creates as session with your access key and secret key stored at ~/.aws/credentials. Will throw error if not configured. Examples: | Create Session With Keys | us-west-1 | access key | secret key | """ self.rb_logger.info("Creating Session: %s" % region) session = boto3.Session(aws_access_key_id=access_key, aws_secret_access_key=secret_key, region_name=region) print(session) self._cache.register(session, alias=region) self.state.session = session return session @keyword('Create Session With Profile') def create_session_with_profile(self, region, profile): """Takes Region as an argument and creates as session with your profile stored at ~/.aws/config. Will throw error if not configured Examples: | Create Session With Profile | us-west-1 | profile name | """ self.rb_logger.info(f"Creating Session: {region}, {profile}") session = boto3.Session(profile_name=profile, region_name=region) self._cache.register(session, alias=region) self.state.session = session return session @keyword('Delete Session') def delete_session(self, region, profile=None): """Removes session. Arguments: - ``region``: A case and space insensitive string to identify the session. (Default ``region``) Examples: | Delete Session | REGION | """ self._cache.switch(region) index = self._cache.current_index self._cache.current = self._cache._no_current self._cache._connections[index - 1] = None self._cache._aliases.pop(region) @keyword('Delete All Sessions') def delete_all_sessions(self): """ Delete All Sessions """ self._cache.empty_cache()
class TestConnnectionCache(unittest.TestCase): def setUp(self): self.cache = ConnectionCache() def test_initial(self): self._verify_initial_state() def test_no_connection(self): assert_raises_with_msg(RuntimeError, 'No open connection', getattr, ConnectionCache().current, 'whatever') assert_raises_with_msg(RuntimeError, 'Custom msg', getattr, ConnectionCache('Custom msg').current, 'xxx') def test_register_one(self): conn = ConnectionMock() index = self.cache.register(conn) assert_equals(index, 1) assert_equals(self.cache.current, conn) assert_equals(self.cache._connections, [conn]) assert_equals(self.cache._aliases, {}) def test_register_multiple(self): conns = [ConnectionMock(), ConnectionMock(), ConnectionMock()] for i, conn in enumerate(conns): index = self.cache.register(conn) assert_equals(index, i+1) assert_equals(self.cache.current, conn) assert_equals(self.cache._connections, conns) def test_switch_with_index(self): self._register('a', 'b', 'c') self._assert_current('c', 3) self.cache.switch(1) self._assert_current('a', 1) self.cache.switch('2') self._assert_current('b', 2) def _assert_current(self, id, index): assert_equals(self.cache.current.id, id) assert_equals(self.cache.current_index, index) def test_switch_with_non_existing_index(self): self._register('a', 'b') assert_raises_with_msg(RuntimeError, "Non-existing index or alias '3'", self.cache.switch, 3) assert_raises_with_msg(RuntimeError, "Non-existing index or alias '42'", self.cache.switch, 42) def test_register_with_alias(self): conn = ConnectionMock() index = self.cache.register(conn, 'My Connection') assert_equals(index, 1) assert_equals(self.cache.current, conn) assert_equals(self.cache._connections, [conn]) assert_equals(self.cache._aliases, {'myconnection': 1}) def test_register_multiple_with_alis(self): c1 = ConnectionMock(); c2 = ConnectionMock(); c3 = ConnectionMock() for i, conn in enumerate([c1,c2,c3]): index = self.cache.register(conn, 'c%d' % (i+1)) assert_equals(index, i+1) assert_equals(self.cache.current, conn) assert_equals(self.cache._connections, [c1, c2, c3]) assert_equals(self.cache._aliases, {'c1': 1, 'c2': 2, 'c3': 3}) def test_switch_with_alias(self): self._register('a', 'b', 'c', 'd', 'e') assert_equals(self.cache.current.id, 'e') self.cache.switch('a') assert_equals(self.cache.current.id, 'a') self.cache.switch('C') assert_equals(self.cache.current.id, 'c') self.cache.switch(' B ') assert_equals(self.cache.current.id, 'b') def test_switch_with_non_existing_alias(self): self._register('a', 'b') assert_raises_with_msg(RuntimeError, "Non-existing index or alias 'whatever'", self.cache.switch, 'whatever') def test_switch_with_alias_overriding_index(self): self._register('2', '1') self.cache.switch(1) assert_equals(self.cache.current.id, '2') self.cache.switch('1') assert_equals(self.cache.current.id, '1') def test_close_all(self): connections = self._register('a', 'b', 'c', 'd') self.cache.close_all() self._verify_initial_state() for conn in connections: assert_true(conn.closed_by_close) def test_close_all_with_given_method(self): connections = self._register('a', 'b', 'c', 'd') self.cache.close_all('exit') self._verify_initial_state() for conn in connections: assert_true(conn.closed_by_exit) def test_empty_cache(self): connections = self._register('a', 'b', 'c', 'd') self.cache.empty_cache() self._verify_initial_state() for conn in connections: assert_false(conn.closed_by_close) assert_false(conn.closed_by_exit) def _verify_initial_state(self): assert_equals(self.cache.current, self.cache._no_current) assert_none(self.cache.current_index) assert_equals(self.cache._connections, []) assert_equals(self.cache._aliases, {}) def _register(self, *ids): connections = [] for id in ids: conn = ConnectionMock(id) self.cache.register(conn, id) connections.append(conn) return connections
class ElasticsearchLibrary(object): """ Library for working with Elasticsearch. Based on: | Python client for Elasticsearch | https://pypi.python.org/pypi/elasticsearch | == Dependencies == | Python client for Elasticsearch | https://pypi.python.org/pypi/elasticsearch | | robot framework | http://robotframework.org | """ ROBOT_LIBRARY_SCOPE = 'GLOBAL' def __init__(self) -> None: """ Initialization. """ self._connection: Optional[Elasticsearch] = None self._cache = ConnectionCache() @property def connection(self) -> Elasticsearch: """Check and return connection to Elasticsearch. *Raises:*\n RuntimeError: if connection to Elasticsearch hasn't been created yet. *Returns:*\n Current connection to Elasticsearch. """ if self._connection is None: raise RuntimeError('There is no open connection to Elasticsearch.') return self._connection def connect_to_elasticsearch(self, host: str, port: Union[int, str], alias: str = 'default') -> int: """ Open connection to Elasticsearch. *Args:*\n _host_ - server host name;\n _port_ - port number;\n _alias_ - http-connection alias;\n *Returns:*\n Connection index. *Example:*\n | Connect To Elasticsearch | 192.168.1.108 | 9200 | alias=cluster1 | """ port = int(port) try: self._connection = Elasticsearch([{'host': host, 'port': port}]) self._connection.host = host self._connection.port = port return self._cache.register(self._connection, alias=alias) except Exception as e: raise Exception(f'Connect to Elasticsearch error: {e}') def disconnect_from_elasticsearch(self) -> None: """ Close connection to Elasticsearch. *Example:*\n | Connect To Elasticsearch | 192.168.1.108 | alias=cluster1 | | Disconnect From Elasticsearch | """ self._connection = None def close_all_elasticsearch_connections(self) -> None: """ Close all connections to ElasticSearch. This keyword is used to close all connections in case if there are several open connections. After execution of this keyword connection index returned by [#Connect To Elasticsearch|Connect To Elasticsearch] starts from 1. *Example:*\n | Connect To Elasticsearch | 192.168.1.108 | alias=cluster1 | | Connect To Elasticsearch | 192.168.1.208 | alias=cluster2 | | Close All Elasticsearch Connections | """ self._connection = None self._cache.empty_cache() def switch_elasticsearch_connection( self, index_or_alias: Union[int, str]) -> int: """ Switch between active connections with several clusters using their index or alias. Alias is set in keyword [#Connect To Elasticsearch|Connect To Elasticsearch] which also returns connection index. *Args:*\n _index_or_alias_ - connection index or alias; *Returns:*\n Previous connection index. *Example:* (switch by alias)\n | Connect To Elasticsearch | 192.168.1.108 | 9200 | alias=cluster1 | | Connect To Elasticsearch | 192.168.1.208 | 9200 | alias=cluster2 | | Switch Elasticsearch Connection | cluster1 | *Example:* (switch by index)\n | ${cluster1}= | Connect To Elasticsearch | 192.168.1.108 | 9200 | | ${cluster2}= | Connect To Elasticsearch | 192.168.1.208 | 9200 | | ${previous_index}= | Switch Elasticsearch Connection | ${cluster1} | | Switch Elasticsearch Connection | ${previous_index} | =>\n ${cluster1}= 1\n ${cluster2}= 2\n ${previous_index}= 2\n """ old_index = self._cache.current_index self._connection = self._cache.switch(index_or_alias) return old_index def is_alive(self) -> bool: """ Check availability of Elasticsearch. Sending GET-request of the following format: 'http://<host>:<port>/' *Returns:*\n bool True, if Elasticsearch is available.\n bool False in other cases. *Raises:*\n Exception if sending GET-request is impossible. *Example:*\n | ${live}= | Is Alive | =>\n True """ try: info = self.connection.info() return info["cluster_name"] == "elasticsearch" except Exception as e: logger.debug(f"Exception {e} raised working with Elasticsearch on " f"{self.connection.host} and {self.connection.port}" ) # type: ignore raise def es_save_data(self, es_string: str) -> None: """ Add data to Elasticsearch. *Args:*\n _es_string_ - string with data for Elasticsearch;\n *Example:*\n | Connect To Elasticsearch | 192.168.1.108 | 9200 | | Es Save Data | some_string1 | | Close All Elasticsearch Connections | """ json_str = json.dumps({"key": es_string}) body = json.loads(json_str) self.connection.index(index='es', doc_type='es', id=1, body=body) def es_retrieve_data(self) -> Dict: """ Get data from Elasticsearch. *Returns:*\n Data from Elastcisearch. *Example:*\n | Connect To Elasticsearch | 192.168.1.108 | 9200 | | ${data}= | Wait Until Keyword Succeeds | 5x | 10s | Es Retrieve Data | | Close All Elasticsearch Connections | """ try: data = self.connection.get(index='es', doc_type='es', id=1) return data except Exception as e: logger.debug(f"Exception {e} raised working with Elasticsearch on " f"{self.connection.host} and {self.connection.port}" ) # type: ignore raise def es_search(self, es_string: str) -> Dict: """ Search for data in Elasticsearch. *Args:*\n _es_string_ - string for searching in Elasticsearch;\n *Returns:*\n Search results from Elastcisearch. *Example:*\n | Connect To Elasticsearch | 192.168.1.108 | 9200 | | ${search_result}= | Es Search | some_string2 | | Close All Elasticsearch Connections | """ try: search_result = self.connection.search( index="es", body={"query": { "match": { 'key': es_string } }}) return search_result except Exception as e: logger.debug(f"Exception {e} raised working with Elasticsearch on " f"{self.connection.host} and {self.connection.port}" ) # type: ignore raise
class OracleDB(object): """ Robot Framework library for working with Oracle DB. == Dependencies == | cx_Oracle | http://cx-oracle.sourceforge.net | version >= 5.3 | | robot framework | http://robotframework.org | """ DEFAULT_TIMEOUT = 900.0 # The default timeout for executing an SQL query is 15 minutes ROBOT_LIBRARY_SCOPE = 'GLOBAL' last_executed_statement: Optional[str] = None last_executed_statement_params: Optional[Dict[str, Any]] = None last_used_connection_index: Optional[int] = None def __init__(self) -> None: """Library initialization. Robot Framework ConnectionCache() class is prepared for working with concurrent connections.""" self._connection: Optional[cx_Oracle.Connection] = None self._cache = ConnectionCache() @property def connection(self) -> cx_Oracle.Connection: """Get current connection to Oracle database. *Raises:*\n RuntimeError: if there isn't any open connection. *Returns:*\n Current connection to the database. """ if self._connection is None: raise RuntimeError( 'There is no open connection to Oracle database.') return self._connection def make_dsn(self, host: str, port: str, sid: str, service_name: str = '') -> str: """ Build dsn string for use in connection. *Args:*\n host - database host;\n port - database port;\n sid - database sid;\n service_name - database service name;\n *Returns:*\n Returns dsn string. """ return cx_Oracle.makedsn(host=host, port=port, sid=sid, service_name=service_name) def connect_to_oracle(self, dbname: str, dbusername: str, dbpassword: str = None, alias: str = None) -> int: """ Connection to Oracle DB. *Args:*\n _dbname_ - database name;\n _dbusername_ - username for db connection;\n _dbpassword_ - password for db connection;\n _alias_ - connection alias, used for switching between open connections;\n *Returns:*\n Returns ID of the new connection. The connection is set as active. *Example:*\n | Connect To Oracle | rb60db | bis | password | """ try: logger.debug( f'Connecting using : dbname={dbname}, dbusername={dbusername}, dbpassword={dbpassword}' ) connection_string = f'{dbusername}/{dbpassword}@{dbname}' self._connection = cx_Oracle.connect(connection_string) return self._cache.register(self.connection, alias) except cx_Oracle.DatabaseError as err: raise Exception("Logon to oracle Error:", str(err)) def disconnect_from_oracle(self) -> None: """ Close active Oracle connection. *Example:*\n | Connect To Oracle | rb60db | bis | password | | Disconnect From Oracle | """ self.connection.close() self._cache.empty_cache() def close_all_oracle_connections(self) -> None: """ Close all Oracle connections that were opened. You should not use [#Disconnect From Oracle|Disconnect From Oracle] and [#Close All Oracle Connections|Close All Oracle Connections] together. After calling this keyword connection IDs returned by opening new connections [#Connect To Oracle|Connect To Oracle], will start from 1. *Example:*\n | Connect To Oracle | rb60db | bis | password | alias=bis | | Connect To Oracle | rb60db | bis_dcs | password | alias=bis_dsc | | Switch Oracle Connection | bis | | @{sql_out_bis}= | Execute Sql String | select SYS_CONTEXT('USERENV', 'CURRENT_SCHEMA') from dual | | Switch Oracle Connection | bis_dsc | | @{sql_out_bis_dsc}= | Execute Sql String | select SYS_CONTEXT('USERENV', 'CURRENT_SCHEMA') from dual | | Close All Oracle Connections | """ self._connection = self._cache.close_all() def switch_oracle_connection(self, index_or_alias: Union[int, str]) -> int: """ Switch between existing Oracle connections using their connection IDs or aliases. The connection ID is obtained on creating connection. Connection alias is optional and can be set at connecting to DB [#Connect To Oracle|Connect To Oracle]. *Args:*\n _index_or_alias_ - connection ID or alias assigned to connection; *Returns:*\n ID of the previous connection. *Example:* (switch by alias)\n | Connect To Oracle | rb60db | bis | password | alias=bis | | Connect To Oracle | rb60db | bis_dcs | password | alias=bis_dsc | | Switch Oracle Connection | bis | | @{sql_out_bis}= | Execute Sql String | select SYS_CONTEXT('USERENV', 'CURRENT_SCHEMA') from dual | | Switch Oracle Connection | bis_dsc | | @{sql_out_bis_dsc}= | Execute Sql String | select SYS_CONTEXT('USERENV', 'CURRENT_SCHEMA') from dual | | Close All Oracle Connections | =>\n @{sql_out_bis} = BIS\n @{sql_out_bis_dcs}= BIS_DCS *Example:* (switch by index)\n | ${bis_index}= | Connect To Oracle | rb60db | bis | password | | ${bis_dcs_index}= | Connect To Oracle | rb60db | bis_dcs | password | | @{sql_out_bis_dcs_1}= | Execute Sql String | select SYS_CONTEXT('USERENV', 'CURRENT_SCHEMA') from dual | | ${previous_index}= | Switch Oracle Connection | ${bis_index} | | @{sql_out_bis}= | Execute Sql String | select SYS_CONTEXT('USERENV', 'CURRENT_SCHEMA') from dual | | Switch Oracle Connection | ${previous_index} | | @{sql_out_bis_dcs_2}= | Execute Sql String | select SYS_CONTEXT('USERENV', 'CURRENT_SCHEMA') from dual | | Close All Oracle Connections | =>\n ${bis_index}= 1\n ${bis_dcs_index}= 2\n @{sql_out_bis_dcs_1} = BIS_DCS\n ${previous_index}= 2\n @{sql_out_bis} = BIS\n @{sql_out_bis_dcs_2}= BIS_DCS """ old_index = self._cache.current_index self._connection = self._cache.switch(index_or_alias) return old_index @staticmethod def wrap_into_html_details(statement: str, summary: str) -> str: """Format statement for html logging. *Args:*\n _statement_: statement to log. _summary_: summary for details tag. *Returns:*\n Formatted statement. """ statement = sqlparse.format(statement, reindent=True, indent_width=4, keyword_case='upper') statement_html = escape(statement) data = f'<details><summary>{summary}</summary><p>{statement_html}</p></details>' return data def _execute_sql(self, cursor: cx_Oracle.Cursor, statement: str, params: Dict[str, Any]) -> cx_Oracle.Cursor: """ Execute SQL query on Oracle DB using active connection. *Args*:\n _cursor_: cursor object.\n _statement_: SQL query to be executed.\n _params_: SQL query parameters.\n *Returns:*\n Query results. """ statement_with_params = self._replace_parameters_in_statement( statement, params) _connection_info = '@'.join( (cursor.connection.username, cursor.connection.dsn)) data = self.wrap_into_html_details( statement=statement_with_params, summary=f'Executed PL/SQL statement on {_connection_info}') logger.info(data, html=True) cursor.prepare(statement) self.last_executed_statement = self._replace_parameters_in_statement( statement, params) self.last_used_connection_index = self._cache.current_index cursor.execute(None, params) @staticmethod def _get_timeout_from_execution_context() -> float: """Get timeout from Robot Framework execution context. Returns: Current timeout value in seconds or None if timeout is not set. """ timeouts = {} default_timeout = OracleDB.DEFAULT_TIMEOUT for timeout in EXECUTION_CONTEXTS.current.timeouts: if timeout.active: timeouts[timeout.type] = timeout.time_left() if timeouts.get(KeywordTimeout.type, None): return timeouts[KeywordTimeout.type] test_timeout = timeouts.get(TestTimeout.type, None) return test_timeout if test_timeout and test_timeout < default_timeout else default_timeout def _replace_parameters_in_statement(self, statement: str, params: Dict[str, Any]) -> str: """Update SQL query parameters, if any exist, with their values for logging purposes. *Args*:\n _statement_: SQL query to be updated.\n _params_: SQL query parameters.\n *Returns:*\n SQL query with parameter names replaced with their values. """ params_keys = sorted(params.keys(), reverse=True) for key in params_keys: if isinstance(params[key], (int, float)): statement = statement.replace(f':{key}', str(params[key])) elif params[key] is None: statement = statement.replace(f':{key}', 'NULL') else: statement = statement.replace(f':{key}', f"'{params[key]}'") return statement def execute_plsql_block(self, plsqlstatement: str, **params: Any) -> None: """ PL/SQL block execution. *Args:*\n _plsqlstatement_ - PL/SQL block;\n _params_ - PL/SQL block parameters;\n *Raises:*\n PLSQL Error: Error message encoded according to DB where the code was run *Returns:*\n PL/SQL block execution result. *Example:*\n | *Settings* | *Value* | | Library | OracleDB | | *Variables* | *Value* | | ${var_failed} | 3 | | *Test Cases* | *Action* | *Argument* | *Argument* | *Argument* | | Simple | | | ${statement}= | catenate | SEPARATOR=\\r\\n | DECLARE | | | ... | | | a NUMBER := ${var_failed}; | | | ... | | | BEGIN | | | ... | | | a := a + 1; | | | ... | | | if a = 4 then | | | ... | | | raise_application_error ( -20001, 'This is a custom error' ); | | | ... | | | end if; | | | ... | | | END; | | | Execute Plsql Block | plsqlstatement=${statement} | =>\n DatabaseError: ORA-20001: This is a custom error | | ${statement}= | catenate | SEPARATOR=\\r\\n | DECLARE | | | ... | | | a NUMBER := :var; | | | ... | | | BEGIN | | | ... | | | a := a + 1; | | | ... | | | if a = 4 then | | | ... | | | raise_application_error ( -20001, 'This is a custom error' ); | | | ... | | | end if; | | | ... | | | END; | | | Execute Plsql Block | plsqlstatement=${statement} | var=${var_failed} | =>\n DatabaseError: ORA-20001: This is a custom error """ cursor = self.connection.cursor() with sql_timeout(timeout=self._get_timeout_from_execution_context(), connection=cursor.connection): try: self._execute_sql(cursor, plsqlstatement, params) self.connection.commit() finally: self.connection.rollback() def execute_plsql_block_with_dbms_output(self, plsqlstatement: str, **params: Any) -> List[str]: """ Execute PL/SQL block with dbms_output(). *Args:*\n _plsqlstatement_ - PL/SQL block;\n _params_ - PL/SQL block parameters;\n *Raises:*\n PLSQL Error: Error message encoded according to DB where the code was run. *Returns:*\n List of values returned by Oracle dbms_output.put_line(). *Example:*\n | *Settings* | *Value* | | Library | OracleDB | | *Variables* | *Value* | | ${var} | 4 | | *Test Cases* | *Action* | *Argument* | *Argument* | *Argument* | | Simple | | | ${statement}= | catenate | SEPARATOR=\\r\\n | DECLARE | | | ... | | | a NUMBER := ${var}; | | | ... | | | BEGIN | | | ... | | | a := a + 1; | | | ... | | | if a = 4 then | | | ... | | | raise_application_error ( -20001, 'This is a custom error' ); | | | ... | | | end if; | | | ... | | | dbms_output.put_line ('text '||a||', e-mail text'); | | | ... | | | dbms_output.put_line ('string 2 '); | | | ... | | | END; | | | @{dbms}= | Execute Plsql Block With Dbms Output | plsqlstatement=${statement} | =>\n | @{dbms} | text 5, e-mail text | | | string 2 | | | ${statement}= | catenate | SEPARATOR=\\r\\n | DECLARE | | | ... | | | a NUMBER := :var; | | | ... | | | BEGIN | | | ... | | | a := a + 1; | | | ... | | | if a = 4 then | | | ... | | | raise_application_error ( -20001, 'This is a custom error' ); | | | ... | | | end if; | | | ... | | | dbms_output.put_line ('text '||a||', e-mail text'); | | | ... | | | dbms_output.put_line ('string 2 '); | | | ... | | | END; | | | @{dbms}= | Execute Plsql Block With Dbms Output | plsqlstatement=${statement} | var=${var} | =>\n | @{dbms} | text 5, e-mail text | | | string 2 | """ dbms_output = [] cursor = self.connection.cursor() with sql_timeout(timeout=self._get_timeout_from_execution_context(), connection=cursor.connection): try: cursor.callproc("dbms_output.enable") self._execute_sql(cursor, plsqlstatement, params) self.connection.commit() statusvar = cursor.var(cx_Oracle.NUMBER) linevar = cursor.var(cx_Oracle.STRING) while True: cursor.callproc("dbms_output.get_line", (linevar, statusvar)) if statusvar.getvalue() != 0: break dbms_output.append(linevar.getvalue()) return dbms_output finally: self.connection.rollback() def execute_plsql_script(self, file_path: str, **params: Any) -> None: """ Execution of PL/SQL code from file. *Args:*\n _file_path_ - path to PL/SQL script file;\n _params_ - PL/SQL code parameters;\n *Raises:*\n PLSQL Error: Error message encoded according to DB where the code was run. *Example:*\n | Execute Plsql Script | ${CURDIR}${/}plsql_script.sql | | Execute Plsql Script | ${CURDIR}${/}plsql_script.sql | first_param=1 | second_param=2 | """ with open(file_path, "r") as script: data = script.read() self.execute_plsql_block(data, **params) def execute_sql_string(self, plsqlstatement: str, **params: Any) -> List[Tuple[Any, ...]]: """ Execute PL/SQL string. *Args:*\n _plsqlstatement_ - PL/SQL string;\n _params_ - PL/SQL string parameters;\n *Raises:*\n PLSQL Error: Error message encoded according to DB where the code was run. *Returns:*\n PL/SQL string execution result. *Example:*\n | @{query}= | Execute Sql String | select sysdate, sysdate+1 from dual | | Set Test Variable | ${sys_date} | ${query[0][0]} | | Set Test Variable | ${next_date} | ${query[0][1]} | | @{query}= | Execute Sql String | select sysdate, sysdate+:d from dual | d=1 | | Set Test Variable | ${sys_date} | ${query[0][0]} | | Set Test Variable | ${next_date} | ${query[0][1]} | """ cursor = self.connection.cursor() with sql_timeout(timeout=self._get_timeout_from_execution_context(), connection=cursor.connection): try: self._execute_sql(cursor, plsqlstatement, params) query_result = cursor.fetchall() self.result_logger(query_result) return query_result finally: self.connection.rollback() def execute_sql_string_mapped(self, sql_statement: str, **params: Any) -> List[Dict[str, Any]]: """SQL query execution where each result row is mapped as a dict with column names as keys. *Args:*\n _sql_statement_ - PL/SQL string;\n _params_ - PL/SQL string parameters;\n *Returns:*\n A list of dictionaries where column names are mapped as keys. *Example:*\n | @{query}= | Execute Sql String Mapped| select sysdate, sysdate+1 from dual | | Set Test Variable | ${sys_date} | ${query[0][sysdate]} | | Set Test Variable | ${next_date} | ${query[0][sysdate1]} | | @{query}= | Execute Sql String Mapped| select sysdate, sysdate+:d from dual | d=1 | | Set Test Variable | ${sys_date} | ${query[0][sysdate]} | | Set Test Variable | ${next_date} | ${query[0][sysdate1]} | """ cursor = self.connection.cursor() with sql_timeout(timeout=self._get_timeout_from_execution_context(), connection=cursor.connection): try: self._execute_sql(cursor, sql_statement, params) col_name = tuple(i[0] for i in cursor.description) query_result = [dict(zip(col_name, row)) for row in cursor] self.result_logger(query_result) return query_result finally: self.connection.rollback() def execute_sql_string_generator( self, sql_statement: str, **params: Any) -> Iterable[Dict[str, Any]]: """Generator that yields each result row mapped as a dict with column names as keys.\n Intended for use mainly in code for other keywords. *If used, the generator must be explicitly closed before closing DB connection* *Args:*\n _sql_statement_ - PL/SQL string;\n _params_ - PL/SQL string parameters;\n Yields:*\n results dict. """ self.last_executed_statement = sql_statement self.last_executed_statement_params = params cursor = self.connection.cursor() with sql_timeout(timeout=self._get_timeout_from_execution_context(), connection=cursor.connection): try: self._execute_sql(cursor, sql_statement, params) col_name = tuple(i[0] for i in cursor.description) for row in cursor: yield dict(zip(col_name, row)) finally: self.connection.rollback() def result_logger(self, query_result: List[Any], result_amount: int = 10) -> None: """Log first n rows from the query results *Args:*\n _query_result_ - query result to log, must be greater than 0 _result_amount_ - amount of entries to display from result """ if len(query_result) > result_amount > 0: query_result = query_result[:result_amount] logged_result = self.wrap_into_html_details(str(query_result), "SQL Query Result") logger.info(logged_result, html=True) @contextmanager def use_connection(self, conn_index: Union[int, str]) -> Iterator[None]: """Context manager for switching connection. Args: conn_index: Connection index or alias to switch. Yields: generator. """ _old_con_index = self.switch_oracle_connection(conn_index) yield self.switch_oracle_connection(_old_con_index)
class RestKeywords(object): def __init__(self): self._cache = ConnectionCache() self._builtin = BuiltIn() @staticmethod def _normalize(d): if type(d) is dict: return dict((k, RestKeywords._normalize(v)) for k, v in d.iteritems() if v and RestKeywords._normalize(v)) elif type(d) is list: return [RestKeywords._normalize(v) for v in d if v and RestKeywords._normalize(v)] else: return d @staticmethod def convert_to_json(strings, normalize="False"): if type(strings) is list: json_ = map(lambda s: json.loads(s), strings) else: json_ = json.loads(strings) if normalize.upper() == "TRUE": if type(json_) is list: json_ = map(lambda x: RestKeywords._normalize(x), json_) else: json_ = RestKeywords._normalize(json_) return json_ @staticmethod def convert_to_multipart_encoded_files(files): mpe_files = [] for f in files: form_field_name = f[0] file_name = path.basename(f[1]) file_path = f[1] mime_type = f[2] mpe_files.append((form_field_name, (file_name, open(file_path, "rb"), mime_type))) return mpe_files def create_session(self, alias, headers=None, auth=None, verify="False", cert=None): session = Session() if headers: session.headers.update(headers) if auth: session.auth = tuple(auth) session.verify = self._builtin.convert_to_boolean(verify) session.cert = cert self._cache.register(session, alias) def head(self, alias, url, params=None, headers=None, cookies=None, timeout=10): logger.info("Sending HEAD request to: '" + url + "', session: '" + alias + "'") session = self._cache.switch(alias) resp = session.head(url, params=params, headers=headers, cookies=cookies, timeout=timeout) return {"status": resp.status_code, "headers": resp.headers} def get(self, alias, url, params=None, headers=None, cookies=None, timeout=10): logger.info("Sending GET request to: '" + url + "', session: '" + alias + "'") session = self._cache.switch(alias) resp = session.get(url, params=params, headers=headers, cookies=cookies, timeout=timeout) try: return {"status": resp.status_code, "headers": resp.headers, "body": resp.json()} except ValueError: return {"status": resp.status_code, "headers": resp.headers, "body": resp.content} def post(self, alias, url, headers=None, data=None, files=None, cookies=None, timeout=10): logger.info("Sending POST request to: '" + url + "', session: '" + alias + "'") session = self._cache.switch(alias) resp = session.post(url, headers=headers, cookies=cookies, data=data.encode("utf-8"), files=files, timeout=timeout) try: return {"status": resp.status_code, "headers": resp.headers, "body": resp.json()} except ValueError: return {"status": resp.status_code, "headers": resp.headers, "body": resp.content} def put(self, alias, url, headers=None, data=None, cookies=None, timeout=10): logger.info("Sending PUT request to: '" + url + "', session: '" + alias + "'") session = self._cache.switch(alias) resp = session.put(url, headers=headers, cookies=cookies, data=data.encode("utf-8"), timeout=timeout) try: return {"status": resp.status_code, "headers": resp.headers, "body": resp.json()} except ValueError: return {"status": resp.status_code, "headers": resp.headers, "body": resp.content} def delete(self, alias, url, headers=None, data=None, cookies=None, timeout=10): logger.info("Sending DELETE request to: '" + url + "', session: '" + alias + "'") session = self._cache.switch(alias) resp = session.delete(url, headers=headers, cookies=cookies, data=data.encode("utf-8"), timeout=timeout) try: return {"status": resp.status_code, "headers": resp.headers, "body": resp.json()} except ValueError: return {"status": resp.status_code, "headers": resp.headers, "body": resp.content} def close_all_sessions(self): self._cache.empty_cache()
class WinRMLibrary(object): """ Robot Framework library for Windows Remote Management, based on pywinrm. == Enable Windows Remote Shell == - [ http://support.microsoft.com/kb/555966 | KB-555966 ] - Execute on windows server: | winrm set winrm/config/client/auth @{Basic="true"} | winrm set winrm/config/service/auth @{Basic="true"} | winrm set winrm/config/service @{AllowUnencrypted="true"} == Dependence == | pywinrm | https://pypi.python.org/pypi/pywinrm | | robot framework | http://robotframework.org | """ ROBOT_LIBRARY_SCOPE = 'GLOBAL' def __init__(self): self._session = None self._cache = ConnectionCache('No sessions created') def create_session(self, alias, hostname, login, password): """ Create session with windows host. Does not support domain authentification. *Args:*\n _alias_ - robot framework alias to identify the session\n _hostname_ - windows hostname (not IP)\n _login_ - windows local login\n _password_ - windows local password *Returns:*\n Session index *Example:*\n | Create Session | server | windows-host | Administrator | 1234567890 | """ logger.debug('Connecting using : hostname=%s, login=%s, password=%s ' % (hostname, login, password)) self._session = winrm.Session(hostname, (login, password)) return self._cache.register(self._session, alias) def run_cmd(self, alias, command, params=None): """ Execute command on remote mashine. *Args:*\n _alias_ - robot framework alias to identify the session\n _command_ - windows command\n _params_ - lists of command's parameters *Returns:*\n Result object with methods: status_code, std_out, std_err. *Example:*\n | ${params}= | Create List | "/all" | | ${result}= | Run cmd | server | ipconfig | ${params} | | Log | ${result.status_code} | | Log | ${result.std_out} | | Log | ${result.std_err} | =>\n | 0 | Windows IP Configuration | Host Name . . . . . . . . . . . . : WINDOWS-HOST | Primary Dns Suffix . . . . . . . : | Node Type . . . . . . . . . . . . : Hybrid | IP Routing Enabled. . . . . . . . : No | WINS Proxy Enabled. . . . . . . . : No | """ if params is not None: log_cmd = command + ' ' + ' '.join(params) else: log_cmd = command logger.info('Run command on server with alias "%s": %s ' % (alias, log_cmd)) self._session = self._cache.switch(alias) result = self._session.run_cmd(command, params) return result def run_ps(self, alias, script): """ Run power shell script on remote mashine. *Args:*\n _alias_ - robot framework alias to identify the session\n _script_ - power shell script\n *Returns:*\n Result object with methods: status_code, std_out, std_err. *Example:*\n | ${result}= | Run ps | server | get-process iexplore|select -exp ws|measure-object -sum|select -exp Sum | | Log | ${result.status_code} | | Log | ${result.std_out} | | Log | ${result.std_err} | =>\n | 0 | 56987648 | """ logger.info('Run power shell script on server with alias "%s": %s ' % (alias, script)) self._session = self._cache.switch(alias) result = self._session.run_ps(script) return result def delete_all_sessions(self): """ Removes all sessions with windows hosts""" self._cache.empty_cache()
class CouchbaseLibrary(object): """ Robot Framework library to work with Couchbase. Based on: [ http://pythonhosted.org/couchbase | Couchbase Python Client Library] == Dependencies == | robot framework | http://robotframework.org | | robotframework-jsonvalidator | https://pypi.python.org/pypi/robotframework-jsonvalidator | | Couchbase Python Client Library | http://pythonhosted.org/couchbase/ | """ ROBOT_LIBRARY_SCOPE = 'GLOBAL' def __init__(self): """ Initialization. """ self._bucket = None self._cache = ConnectionCache() def connect_to_couchbase_bucket(self, host, port, bucket_name, password=None, alias=None): """ Connect to a Couchbase bucket. *Args:*\n _host_ - couchbase server host name;\n _port_ - couchbase server port number;\n _bucket_name_ - couchbase bucket name;\n _password_ - password;\n _alias_ - connection alias;\n *Example:*\n | Connect To Couchbase Bucket | my_host_name | 8091 | bucket_name | password | alias=bucket | """ logger.debug( 'Connecting using : host=%s, port=%s, bucketName=%s, password=%s ' % (host, port, bucket_name, password)) connection_string = '{host}:{port}/{bucket}'.format(host=host, port=port, bucket=bucket_name) try: bucket = Bucket(connection_string, password=password) self._bucket = bucket return self._cache.register(self._bucket, alias) except CouchbaseError as info: raise Exception("Could not connect to Couchbase bucket. Error:", str(info)) def disconnect_from_couchbase_bucket(self): """ Close the current connection with a Couchbase bucket. *Example:*\n | Connect To Couchbase Bucket | my_host_name | 8091 | bucket_name | password | alias=bucket | | Disconnect From Couchbase Bucket | """ if self._bucket: self._bucket._close() self._cache.empty_cache() def close_all_couchbase_bucket_connections(self): """ Close all connections with Couchbase buckets. This keyword is used to close all connections in case, there are several open connections. Do not use keywords [#Disconnect From Couchbase Bucket | Disconnect From Couchbase Bucket] and [#Close All Couchbase Bucket Connections | Close All Couchbase Bucket Connections] together. After execution of this keyword, index returned by [#Connect To Couchbase Bucket | Connect To Couchbase Bucket] starts at 1. *Example:*\n | Connect To Couchbase Bucket | my_host_name | 8091 | bucket_name | password | alias=bucket | | Close All Couchbase Bucket Connections | """ self._bucket = self._cache.close_all() def switch_couchbase_bucket_connections(self, index_or_alias): """ Switch between active connections with Couchbase buckets using their index or alias. Connection alias is set in [#Connect To Couchbase Bucket | Connect To Couchbase Bucket], which also returns the connection index. *Args:*\n _index_or_alias_ - connection index or alias; *Returns:*\n Index of the previous connection. *Example:*\n | Connect To Couchbase Bucket | my_host_name | 8091 | bucket_name | password | alias=bucket1 | | Connect To Couchbase Bucket | my_host_name | 8091 | bucket_name | password | alias=bucket2 | | Switch Couchbase Bucket Connection | bucket1 | | View Document By Key | key=1C1#000 | | Switch Couchbase Connection | bucket2 | | View Document By Key | key=1C1#000 | | Close All Couchbase Bucket Connections | """ old_index = self._cache.current_index self._bucket = self._cache.switch(index_or_alias) return old_index def view_document_by_key(self, key): """ Get information about the presence of a document in the Couchbase bucket by the given key. Depending on the value of the return code, the presence or absence of the document in the bucket is determined. *Args:*\n _key_ - document key;\n *Returns:*\n Value of return code.\n If rc=0, the document is available in the bucket. *Example:*\n | ${rc}= | View Document By Key | key=1C1#000 | """ result = self._bucket.get(key, quiet=True).rc return result def bucket_contains_document_by_key(self, key): """ Check if the Couchbase bucket contains the document by the given key. Also it's possible to specify a reference document for comparison. The option `quite` is used to check the presence of the document in the bucket. *Args:*\n _key_ - document key;\n *Returns:*\n True, if there is a document with this key. *Example:*\n | ${contain}= | Bucket Contains Document By Key | key=1C1#000 | | Should Be True | ${contain} | """ result = self._bucket.get(key, quiet=True) logger.debug("{key} contains is {success} with code={code}".format( key=key, success=result.success, code=result.rc)) return result.success is True def get_document_cas_by_key(self, key): """ Get CAS of the document in the Couchbase bucket by the given key. *Args:*\n _key_ - document key;\n *Returns:*\n CAS value of the document.\n *Example:*\n | ${cas}= | Get Document Cas By Key | key=1C1#000 | """ result = self._bucket.get(key).cas return result def get_document_value_by_key(self, key): """ Get a document value in the Couchbase bucket by the given key. *Args:*\n _key_ - document key;\n *Returns:*\n Dictionary with the value of the document.\n *Example:*\n | ${value}= | Get Document Value By Key | key=1C1#000 | """ result = self._bucket.get(key).value return result def validate_document_by_json(self, key, json_expr): """ Checking a document to match the json expression. The document is specified by the key in the bucket. *Args:*\n _key_ - document key in the bucket;\n _json_expr_ - JSONSelect expression.\n *Returns:*\n True if the document exists in the bucket and matches the json expression.\n *Example:*\n | ${valid}= | Validate Document By Json | key=dockey | json_expr=.somekey:val("value") | | Should Be True | ${valid} | """ result = self._bucket.get(key, quiet=True) if result.success is not True: return False try: validator = JsonValidator() json_txt = validator.json_to_string(result.value) validator.element_should_exist(json_txt, json_expr) except JsonValidatorError as error: logger.debug( "on json validation got exception {ex}".format(ex=error)) return False return True def certainly_delete_document_by_key(self, key): """ Remove a document for a given key from Couchbase bucket. Doesn't raise NotFoundError if the key doesn't exist. *Args:*\n _key_ - document key;\n *Example:*\n | Certainly Delete Document By Key | key=1C1#000 | """ self._bucket.remove(key, quiet=True) def upsert_document(self, key, value): """ Insert or update a document in the current Couchbase bucket. *Args:*\n _key_ - document key;\n _value_ - document body;\n *Example:*\n | Upsert Document | somekey | {'key': 'value'} | """ self._bucket.upsert(key, value)
class TestConnectionCache(unittest.TestCase): def setUp(self): self.cache = ConnectionCache() def test_initial(self): self._verify_initial_state() def test_no_connection(self): assert_raises_with_msg(RuntimeError, 'No open connection.', getattr, ConnectionCache().current, 'whatever') assert_raises_with_msg(RuntimeError, 'Custom msg', getattr, ConnectionCache('Custom msg').current, 'xxx') def test_register_one(self): conn = ConnectionMock() index = self.cache.register(conn) assert_equal(index, 1) assert_equal(self.cache.current, conn) assert_equal(self.cache.current_index, 1) assert_equal(self.cache._connections, [conn]) assert_equal(self.cache._aliases, {}) def test_register_multiple(self): conns = [ConnectionMock(1), ConnectionMock(2), ConnectionMock(3)] for i, conn in enumerate(conns): index = self.cache.register(conn) assert_equal(index, i+1) assert_equal(self.cache.current, conn) assert_equal(self.cache.current_index, i+1) assert_equal(self.cache._connections, conns) def test_register_multiple_equal_objects(self): conns = [ConnectionMock(1), ConnectionMock(1), ConnectionMock(1)] for i, conn in enumerate(conns): index = self.cache.register(conn) assert_equal(index, i+1) assert_equal(self.cache.current, conn) assert_equal(self.cache.current_index, i+1) assert_equal(self.cache._connections, conns) def test_register_multiple_same_object(self): conns = [ConnectionMock()] * 3 for i, conn in enumerate(conns): index = self.cache.register(conn) assert_equal(index, i+1) assert_equal(self.cache.current, conn) assert_equal(self.cache.current_index, 1) assert_equal(self.cache._connections, conns) def test_set_current_index(self): self.cache.current_index = None assert_equal(self.cache.current_index, None) self._register('a', 'b') self.cache.current_index = 1 assert_equal(self.cache.current_index, 1) assert_equal(self.cache.current.id, 'a') self.cache.current_index = None assert_equal(self.cache.current_index, None) assert_equal(self.cache.current, self.cache._no_current) self.cache.current_index = 2 assert_equal(self.cache.current_index, 2) assert_equal(self.cache.current.id, 'b') def test_set_invalid_index(self): assert_raises(IndexError, setattr, self.cache, 'current_index', 1) def test_switch_with_index(self): self._register('a', 'b', 'c') self._assert_current('c', 3) self.cache.switch(1) self._assert_current('a', 1) self.cache.switch('2') self._assert_current('b', 2) def _assert_current(self, id, index): assert_equal(self.cache.current.id, id) assert_equal(self.cache.current_index, index) def test_switch_with_non_existing_index(self): self._register('a', 'b') assert_raises_with_msg(RuntimeError, "Non-existing index or alias '3'.", self.cache.switch, 3) assert_raises_with_msg(RuntimeError, "Non-existing index or alias '42'.", self.cache.switch, 42) def test_register_with_alias(self): conn = ConnectionMock() index = self.cache.register(conn, 'My Connection') assert_equal(index, 1) assert_equal(self.cache.current, conn) assert_equal(self.cache._connections, [conn]) assert_equal(self.cache._aliases, {'myconnection': 1}) def test_register_multiple_with_alias(self): c1 = ConnectionMock(); c2 = ConnectionMock(); c3 = ConnectionMock() for i, conn in enumerate([c1,c2,c3]): index = self.cache.register(conn, 'c%d' % (i+1)) assert_equal(index, i+1) assert_equal(self.cache.current, conn) assert_equal(self.cache._connections, [c1, c2, c3]) assert_equal(self.cache._aliases, {'c1': 1, 'c2': 2, 'c3': 3}) def test_switch_with_alias(self): self._register('a', 'b', 'c', 'd', 'e') assert_equal(self.cache.current.id, 'e') self.cache.switch('a') assert_equal(self.cache.current.id, 'a') self.cache.switch('C') assert_equal(self.cache.current.id, 'c') self.cache.switch(' B ') assert_equal(self.cache.current.id, 'b') def test_switch_with_non_existing_alias(self): self._register('a', 'b') assert_raises_with_msg(RuntimeError, "Non-existing index or alias 'whatever'.", self.cache.switch, 'whatever') def test_switch_with_alias_overriding_index(self): self._register('2', '1') self.cache.switch(1) assert_equal(self.cache.current.id, '2') self.cache.switch('1') assert_equal(self.cache.current.id, '1') def test_get_connection_with_index(self): self._register('a', 'b') assert_equal(self.cache.get_connection(1).id, 'a') assert_equal(self.cache.current.id, 'b') assert_equal(self.cache[2].id, 'b') def test_get_connection_with_alias(self): self._register('a', 'b') assert_equal(self.cache.get_connection('a').id, 'a') assert_equal(self.cache.current.id, 'b') assert_equal(self.cache['b'].id, 'b') def test_get_connection_with_none_returns_current(self): self._register('a', 'b') assert_equal(self.cache.get_connection().id, 'b') assert_equal(self.cache[None].id, 'b') def test_get_connection_with_none_fails_if_no_current(self): assert_raises_with_msg(RuntimeError, 'No open connection.', self.cache.get_connection) def test_close_all(self): connections = self._register('a', 'b', 'c', 'd') self.cache.close_all() self._verify_initial_state() for conn in connections: assert_true(conn.closed_by_close) def test_close_all_with_given_method(self): connections = self._register('a', 'b', 'c', 'd') self.cache.close_all('exit') self._verify_initial_state() for conn in connections: assert_true(conn.closed_by_exit) def test_empty_cache(self): connections = self._register('a', 'b', 'c', 'd') self.cache.empty_cache() self._verify_initial_state() for conn in connections: assert_false(conn.closed_by_close) assert_false(conn.closed_by_exit) def test_iter(self): conns = ['a', object(), 1, None] for c in conns: self.cache.register(c) assert_equal(list(self.cache), conns) def test_len(self): assert_equal(len(self.cache), 0) self.cache.register(None) assert_equal(len(self.cache), 1) self.cache.register(None) assert_equal(len(self.cache), 2) self.cache.empty_cache() assert_equal(len(self.cache), 0) def test_truthy(self): assert_false(self.cache) self.cache.register(None) assert_true(self.cache) self.cache.current_index = None assert_false(self.cache) self.cache.current_index = 1 assert_true(self.cache) self.cache.empty_cache() assert_false(self.cache) def test_resolve_alias_or_index(self): self.cache.register(ConnectionMock(), 'alias') assert_equal(self.cache._resolve_alias_or_index('alias'), 1) assert_equal(self.cache.resolve_alias_or_index('1'), 1) assert_equal(self.cache.resolve_alias_or_index(1), 1) def test_resolve_invalid_alias_or_index(self): assert_raises_with_msg(ValueError, "Non-existing index or alias 'nonex'.", self.cache._resolve_alias_or_index, 'nonex') assert_raises_with_msg(ValueError, "Non-existing index or alias '1'.", self.cache.resolve_alias_or_index, '1') assert_raises_with_msg(ValueError, "Non-existing index or alias '42'.", self.cache.resolve_alias_or_index, 42) def _verify_initial_state(self): assert_equal(self.cache.current, self.cache._no_current) assert_equal(self.cache.current_index, None) assert_equal(self.cache._connections, []) assert_equal(self.cache._aliases, {}) def _register(self, *ids): connections = [] for id in ids: conn = ConnectionMock(id) self.cache.register(conn, id) connections.append(conn) return connections
class TestConnnectionCache(unittest.TestCase): def setUp(self): self.cache = ConnectionCache() def test_initial(self): self._verify_initial_state() def test_no_connection(self): assert_raises_with_msg(RuntimeError, 'No open connection.', getattr, ConnectionCache().current, 'whatever') assert_raises_with_msg(RuntimeError, 'Custom msg', getattr, ConnectionCache('Custom msg').current, 'xxx') def test_register_one(self): conn = ConnectionMock() index = self.cache.register(conn) assert_equal(index, 1) assert_equal(self.cache.current, conn) assert_equal(self.cache.current_index, 1) assert_equal(self.cache._connections, [conn]) assert_equal(self.cache._aliases, {}) def test_register_multiple(self): conns = [ConnectionMock(1), ConnectionMock(2), ConnectionMock(3)] for i, conn in enumerate(conns): index = self.cache.register(conn) assert_equal(index, i + 1) assert_equal(self.cache.current, conn) assert_equal(self.cache.current_index, i + 1) assert_equal(self.cache._connections, conns) def test_register_multiple_equal_objects(self): conns = [ConnectionMock(1), ConnectionMock(1), ConnectionMock(1)] for i, conn in enumerate(conns): index = self.cache.register(conn) assert_equal(index, i + 1) assert_equal(self.cache.current, conn) assert_equal(self.cache.current_index, i + 1) assert_equal(self.cache._connections, conns) def test_register_multiple_same_object(self): conns = [ConnectionMock()] * 3 for i, conn in enumerate(conns): index = self.cache.register(conn) assert_equal(index, i + 1) assert_equal(self.cache.current, conn) assert_equal(self.cache.current_index, 1) assert_equal(self.cache._connections, conns) def test_set_current_index(self): self.cache.current_index = None assert_equal(self.cache.current_index, None) self._register('a', 'b') self.cache.current_index = 1 assert_equal(self.cache.current_index, 1) assert_equal(self.cache.current.id, 'a') self.cache.current_index = None assert_equal(self.cache.current_index, None) assert_equal(self.cache.current, self.cache._no_current) self.cache.current_index = 2 assert_equal(self.cache.current_index, 2) assert_equal(self.cache.current.id, 'b') def test_set_invalid_index(self): assert_raises(IndexError, setattr, self.cache, 'current_index', 1) def test_switch_with_index(self): self._register('a', 'b', 'c') self._assert_current('c', 3) self.cache.switch(1) self._assert_current('a', 1) self.cache.switch('2') self._assert_current('b', 2) def _assert_current(self, id, index): assert_equal(self.cache.current.id, id) assert_equal(self.cache.current_index, index) def test_switch_with_non_existing_index(self): self._register('a', 'b') assert_raises_with_msg(RuntimeError, "Non-existing index or alias '3'.", self.cache.switch, 3) assert_raises_with_msg(RuntimeError, "Non-existing index or alias '42'.", self.cache.switch, 42) def test_register_with_alias(self): conn = ConnectionMock() index = self.cache.register(conn, 'My Connection') assert_equal(index, 1) assert_equal(self.cache.current, conn) assert_equal(self.cache._connections, [conn]) assert_equal(self.cache._aliases, {'myconnection': 1}) def test_register_multiple_with_alias(self): c1 = ConnectionMock() c2 = ConnectionMock() c3 = ConnectionMock() for i, conn in enumerate([c1, c2, c3]): index = self.cache.register(conn, 'c%d' % (i + 1)) assert_equal(index, i + 1) assert_equal(self.cache.current, conn) assert_equal(self.cache._connections, [c1, c2, c3]) assert_equal(self.cache._aliases, {'c1': 1, 'c2': 2, 'c3': 3}) def test_switch_with_alias(self): self._register('a', 'b', 'c', 'd', 'e') assert_equal(self.cache.current.id, 'e') self.cache.switch('a') assert_equal(self.cache.current.id, 'a') self.cache.switch('C') assert_equal(self.cache.current.id, 'c') self.cache.switch(' B ') assert_equal(self.cache.current.id, 'b') def test_switch_with_non_existing_alias(self): self._register('a', 'b') assert_raises_with_msg(RuntimeError, "Non-existing index or alias 'whatever'.", self.cache.switch, 'whatever') def test_switch_with_alias_overriding_index(self): self._register('2', '1') self.cache.switch(1) assert_equal(self.cache.current.id, '2') self.cache.switch('1') assert_equal(self.cache.current.id, '1') def test_get_connection_with_index(self): self._register('a', 'b') assert_equal(self.cache.get_connection(1).id, 'a') assert_equal(self.cache.current.id, 'b') assert_equal(self.cache[2].id, 'b') def test_get_connection_with_alias(self): self._register('a', 'b') assert_equal(self.cache.get_connection('a').id, 'a') assert_equal(self.cache.current.id, 'b') assert_equal(self.cache['b'].id, 'b') def test_get_connection_with_none_returns_current(self): self._register('a', 'b') assert_equal(self.cache.get_connection().id, 'b') assert_equal(self.cache[None].id, 'b') def test_get_connection_with_none_fails_if_no_current(self): assert_raises_with_msg(RuntimeError, 'No open connection.', self.cache.get_connection) def test_close_all(self): connections = self._register('a', 'b', 'c', 'd') self.cache.close_all() self._verify_initial_state() for conn in connections: assert_true(conn.closed_by_close) def test_close_all_with_given_method(self): connections = self._register('a', 'b', 'c', 'd') self.cache.close_all('exit') self._verify_initial_state() for conn in connections: assert_true(conn.closed_by_exit) def test_empty_cache(self): connections = self._register('a', 'b', 'c', 'd') self.cache.empty_cache() self._verify_initial_state() for conn in connections: assert_false(conn.closed_by_close) assert_false(conn.closed_by_exit) def test_iter(self): conns = ['a', object(), 1, None] for c in conns: self.cache.register(c) assert_equal(list(self.cache), conns) def test_len(self): assert_equal(len(self.cache), 0) self.cache.register(None) assert_equal(len(self.cache), 1) self.cache.register(None) assert_equal(len(self.cache), 2) self.cache.empty_cache() assert_equal(len(self.cache), 0) def test_truthy(self): assert_false(self.cache) self.cache.register(None) assert_true(self.cache) self.cache.current_index = None assert_false(self.cache) self.cache.current_index = 1 assert_true(self.cache) self.cache.empty_cache() assert_false(self.cache) def _verify_initial_state(self): assert_equal(self.cache.current, self.cache._no_current) assert_equal(self.cache.current_index, None) assert_equal(self.cache._connections, []) assert_equal(self.cache._aliases, {}) def _register(self, *ids): connections = [] for id in ids: conn = ConnectionMock(id) self.cache.register(conn, id) connections.append(conn) return connections
class TestConnnectionCache(unittest.TestCase): def setUp(self): self.cache = ConnectionCache() def test_initial(self): self._verify_initial_state() def test_no_connection(self): assert_raises_with_msg(DataError, 'No open connection', getattr, ConnectionCache().current, 'whatever') assert_raises_with_msg(DataError, 'Custom msg', getattr, ConnectionCache('Custom msg').current, 'xxx') def test_register_one(self): conn = ConnectionMock() index = self.cache.register(conn) assert_equals(index, 1) assert_equals(self.cache.current, conn) assert_equals(self.cache._connections, [conn]) assert_equals(self.cache._aliases, {}) def test_register_multiple(self): conns = [ConnectionMock(), ConnectionMock(), ConnectionMock()] for i, conn in enumerate(conns): index = self.cache.register(conn) assert_equals(index, i + 1) assert_equals(self.cache.current, conn) assert_equals(self.cache._connections, conns) def test_switch_with_index(self): self._register('a', 'b', 'c') self._assert_current('c', 3) self.cache.switch(1) self._assert_current('a', 1) self.cache.switch('2') self._assert_current('b', 2) def _assert_current(self, id, index): assert_equals(self.cache.current.id, id) assert_equals(self.cache.current_index, index) def test_switch_with_non_existing_index(self): self._register('a', 'b') assert_raises_with_msg(DataError, "Non-existing index or alias '3'", self.cache.switch, 3) assert_raises_with_msg(DataError, "Non-existing index or alias '42'", self.cache.switch, 42) def test_register_with_alias(self): conn = ConnectionMock() index = self.cache.register(conn, 'My Connection') assert_equals(index, 1) assert_equals(self.cache.current, conn) assert_equals(self.cache._connections, [conn]) assert_equals(self.cache._aliases, {'myconnection': 1}) def test_register_multiple_with_alis(self): c1 = ConnectionMock() c2 = ConnectionMock() c3 = ConnectionMock() for i, conn in enumerate([c1, c2, c3]): index = self.cache.register(conn, 'c%d' % (i + 1)) assert_equals(index, i + 1) assert_equals(self.cache.current, conn) assert_equals(self.cache._connections, [c1, c2, c3]) assert_equals(self.cache._aliases, {'c1': 1, 'c2': 2, 'c3': 3}) def test_switch_with_alias(self): self._register('a', 'b', 'c', 'd', 'e') assert_equals(self.cache.current.id, 'e') self.cache.switch('a') assert_equals(self.cache.current.id, 'a') self.cache.switch('C') assert_equals(self.cache.current.id, 'c') self.cache.switch(' B ') assert_equals(self.cache.current.id, 'b') def test_switch_with_non_existing_alias(self): self._register('a', 'b') assert_raises_with_msg(DataError, "Non-existing index or alias 'whatever'", self.cache.switch, 'whatever') def test_switch_with_alias_overriding_index(self): self._register('2', '1') self.cache.switch(1) assert_equals(self.cache.current.id, '2') self.cache.switch('1') assert_equals(self.cache.current.id, '1') def test_close_all(self): connections = self._register('a', 'b', 'c', 'd') self.cache.close_all() self._verify_initial_state() for conn in connections: assert_true(conn.closed_by_close) def test_close_all_with_given_method(self): connections = self._register('a', 'b', 'c', 'd') self.cache.close_all('exit') self._verify_initial_state() for conn in connections: assert_true(conn.closed_by_exit) def test_empty_cache(self): connections = self._register('a', 'b', 'c', 'd') self.cache.empty_cache() self._verify_initial_state() for conn in connections: assert_false(conn.closed_by_close) assert_false(conn.closed_by_exit) def _verify_initial_state(self): assert_equals(self.cache.current, self.cache._no_current) assert_none(self.cache.current_index) assert_equals(self.cache._connections, []) assert_equals(self.cache._aliases, {}) def _register(self, *ids): connections = [] for id in ids: conn = ConnectionMock(id) self.cache.register(conn, id) connections.append(conn) return connections
class AristaLibrary(object): """AristaLibrary - A Robot Framework Library for testing Arista EOS Devices. The AristaLibrary has been designed to simplify the task of configuration validation and verification. If you are familiar with Command-API(eAPI), you know that it's already fairly easy to extract configuration data from your EOS nodes, but this library seeks to make configuration validation possible for those who have no programming experience. You'll notice that this library utilizes [https://github.com/arista-eosplus/pyeapi|pyeapi], which greatly simplifies the retreival and analysis of EOS configuration. We encourage you to participate in the development of this library by visiting [https://github.com/aristanetworks/robotframework-aristalibrary|AristaLibrary], hosted on Github. Note: This library has been built for Python only. == Table of contents == - `Installing the library` - `Examples` = Installing the library = You can get the AristaLibrary using PIP | pip install robotframework-aristalibrary or install from source | git clone https://github.com/aristanetworks/robotframework-aristalibrary.git | cd robotframework-aristalibrary/ | python setup.py install = Examples = == Connecting to a test node == == Switching between connected nodes == == Testing the EOS Software version == """ ROBOT_LIBRARY_SCOPE = 'GLOBAL' ROBOT_LIBRARY_VERSION = VERSION def __init__(self, transport="https", host='localhost', username="******", password="******", port="443", alias=None): """Defaults may be changed by specifying when importing the library: | *** Setting *** | Library AristaLibrary | Library AristaLirary | username="******" | password="******" """ self.host = host self.transport = transport self.port = port self.username = username self.password = password self.alias = None self.connections = dict() self._connection = ConnectionCache() # ---------------- Start Core Keywords ---------------- # def connect_to(self, host='localhost', transport='https', port='443', username='******', password='******', alias=None, enablepwd=None, autorefresh=True): """This is the cornerstone of all testing. The Connect To keyword accepts the necessary parameters to setup an API connection to your node. Example: | Connect To | host=192.0.2.50 | transport=http | port=80 | username=myUser | password=secret | | Connect To | host=192.0.2.51 | username=myUser | password=secret | This function returns a connection index, which can be used to change connections during a test suite. Example: | ${switch1}= | Connect To | host=192.0.2.51 | username=myUser | password=secret | You can confirm which interface eAPI is listening on by running: | veos-node>show management api http-commands | *Enabled: Yes* | *HTTPS server: running, set to use port 443* | HTTP server: shutdown, set to use port 80 | VRF: default | Hits: 28 | Last hit: 2128 seconds ago | Bytes in: 1547 | Bytes out: 283966 | Requests: 1 | Commands: 1 | Duration: 0.055 seconds | User Hits Bytes in Bytes out Last hit | ----------- ---------- -------------- --------------- ---------------- | admin 1 1547 283966 2128 seconds ago | | URLs | --------------------------------------- | *Management1 : https://192.0.2.50:443* You can confirm connectivity by firing up a browser and point it to https://<my_url>:<my_port>/command-api If you are new to eAPI see the Arista EOS Central article, [https://eos.arista.com/arista-eapi-101|Arista eAPI 101] """ host = str(host) transport = str(transport) port = int(port) username = str(username) password = str(password) if alias: alias = str(alias) try: client = pyeapi.connect( host=host, transport=transport, username=username, password=password, port=port) client_node = pyeapi.client.Node(client) client_node.autorefresh = autorefresh client_node.enable_authentication(enablepwd) conn_indx = self._connection.register(client_node, alias) except Exception as e: raise e # Always try "show version" when connecting to a node so that if # there is a configuration error, we can fail quickly. try: ver = self._connection.current.enable( ['show version'])[0]['result'] mesg = "Created connection to {}://{}:{}@{}:{}/command-api: "\ "model: {}, serial: {}, systemMAC: {}, version: {}, "\ "lastBootTime: {}".format( transport, username, '****', host, port, ver['modelName'], ver['serialNumber'], ver['systemMacAddress'], ver['version'], ver['bootupTimestamp']) logger.write(mesg, 'INFO', False) except Exception as e: raise e self.connections[conn_indx] = dict(conn=client, node=client_node, index=conn_indx, transport=transport, host=host, username=username, password=password, port=port, alias=alias, autorefresh=autorefresh) return conn_indx def change_to_switch(self, index_or_alias): # TODO update docstring """Change To Switch changes the active switch for all following keywords. Arguments: - `index_or_alias`: The connection index (integer) or the alias (string) of the desired connection. Returns the index of the previous connection which can be stored for reuse. Example: | ${uut1}= | Connect To | ... | | ${uut2}= | Connect To | alias=foo ... | | Configure hostname uut2 | | | | ${previous}= | Change To Switch | ${uut1} | | ${ver_info}= | Run Commands | show version | | Change To Switch | ${previous} | | | Change To Switch | foo | | """ old_index = self._connection.current_index self._connection.switch(index_or_alias) return old_index def clear_all_connections(self): """ Remove all connection objects from the cache and resets the list of indexes. """ self.host = 'localhost' self.transport = 'https' self.port = '443' self.username = '******' self.password = '******' self.connections = dict() # Since we don't really have anything to close, just delete entries. # self._connection.close_all() self._connection.empty_cache() def get_switch(self, index_or_alias=None): """ Get Switch returns a dictionary of information about the active switch connection. Details include the host, username, password, transport and port. Example: | ${uut1} | Connect To | .... | | ${switch_info}= | Get Switch | | | ${switch_info}= | Get Switch | index_or_alias=2 | | ${switch_info}= | Get Switch | index_or_alias=foo | | Log | Connected to port ${switch_info['port']} | | """ if not index_or_alias: index_or_alias = self._connection.current_index # values = self.connections[index_or_alias] try: values = self.connections[ self._connection._resolve_alias_or_index(index_or_alias) ] except (ValueError, KeyError): values = { 'index': None, 'alias': None } return values def get_switches(self): """ The Get Switches keyword returns a list of all nodes that are in your cache. It will return the host, username, password, port, transport. Example: | ${uut1} | Connect To | .... | | ${uut2} | Connect To | .... | | @{switch_info}= | Get Switches | | | Log | First switch connected to port ${switch_info[0]['port']} | | """ return_value = list() for indx, values in self.connections.items(): return_value.append(values) return return_value # ---------------- End Core Keywords ---------------- # # ---------------- Start Analysis Keywords ---------- # def run_cmds(self, commands, encoding='json'): """Run Cmds allows low-level access to run any eAPI command against your switch and then process the output using Robot's builtin keywords. Arguments: - commands: This must be the full eAPI command. command may not the short form that works on the CLI. Example: Good: | show version Bad: | sho ver - `encoding` is the format of the response will be returned from the API request. The two options are 'text' and 'json'. Note that EOS does not support a JSON response for all commands. Please refer to your EOS Command API documentation for more details. Examples: | ${json_dict}= | Run Cmds | show version | | | ${raw_text}= | Run Cmds | show interfaces description | encoding=text | """ if isinstance(commands, six.string_types): commands = [str(commands)] elif isinstance(commands, list): # Handle Python2 unicode strings for idx, command in enumerate(commands): if isinstance(command, six.text_type): commands[idx] = str(command) try: commands = make_iterable(commands) client = self.connections[self._connection.current_index]['conn'] return client.execute(commands, encoding) except CommandError as e: error = "" # This just added by Peter in pyeapi 10 Feb 2015 # if self.active_node.connection.error.command_error: # error = self.active_node.connection.error.command_error raise AssertionError('eAPI CommandError: {}\n{}'.format(e, error)) except Exception as e: raise AssertionError('eAPI execute command: {}'.format(e)) def run_commands(self, commands, all_info=False, encoding='json'): # TODO: Jere update me """Run Commands allows you to run any eAPI command against your switch and then process the output using Robot's builtin keywords. It will automatically ensure the CLI is in `enable` mode prior to executing the command(s). Arguments: - `commands`: This must be the full eAPI command and not the short form that works on the CLI. `commands` may be a single command or a list of commands. When passing a list to Run Commands, it should be given as a scalar. Example: Good: | show version Bad: | sho ver - format: This is the format that the text will be returned from the API request. The two options are 'text' and 'json'. Note that EOS does not support a JSON response for all commands. Please refer to your EOS Command API documentation for more details. Examples: | ${json_dict}= | Run Commands | show version | | | ${raw_text}= | Run Commands | show interfaces description | all_info=True | | @{text}= | show version | show interfaces Ethernet 1 | encoding=text | | @{commands}= | show version | show interfaces Ethernet 1 | | | ${json_dict}= | Run Commands | ${commands} | | """ if isinstance(commands, six.string_types): commands = [str(commands)] elif isinstance(commands, list): # Handle Python2 unicode strings for idx, command in enumerate(commands): if isinstance(command, six.text_type): commands[idx] = str(command) try: if all_info: return self._connection.current.enable( [commands], encoding) return self._connection.current.enable( [commands], encoding)[0]['result'] except CommandError as e: error = "" # This just added by Peter to pyeapi 10 Feb 2015 # if self.active_node.connection.error.command_error: # error = self.active_node.connection.error.command_error raise AssertionError('eAPI CommandError: {}\n{}'.format(e, error)) except Exception as e: raise AssertionError('eAPI execute command: {}'.format(e)) def enable(self, commands, encoding='json'): """ The Enable keyword lets you run a list of commands in enable mode. It returns a list containing the list of commands, output of those commands and the encoding used. It uses the pyeapi Node.enable() function. If a command fails due to an encoding error, then the command set will be re-issued individual with text encoding. Arguments: - `command`: This must be the full eAPI command and not the short form that works on the CLI. `commands` may be a single command or a list of commands. When passing a list to Run Commands, it should be given as a scalar. Example: | @{list_v}= | Create List | show version | show hostname | | ${enable}= | Enable | ${list_v} | | """ if isinstance(commands, six.string_types): commands = [str(commands)] elif isinstance(commands, list): # Handle Python2 unicode strings for idx, command in enumerate(commands): if isinstance(command, six.text_type): commands[idx] = str(command) try: return self._connection.current.enable(commands, encoding) except CommandError as e: raise AssertionError('eAPI enable CommandError:' ' {} {}'.format(e, commands)) except Exception as e: raise AssertionError('eAPI enable execute command: {}'.format(e)) def get_startup_config(self, section=None): """ The Get Startup Config keyword retrieves the startup config from the node as a string. Arguments: - 'section' (regex): If supplied, the section regex will be matched to return the indicated block of the startup config. If omitted, Get Startup Config returns the entire startup config. Example: | ${startup}= | Get Startup Config | | ${startup}= | Get Startup Config | section=^management api http-commands$ | ${startup}= | Get Startup Config | section=^interface Ethernet1 | ${startup}= | Get Startup Config | ^interface Ethernet2 """ if section: try: return self._connection.current.section( section, config='startup_config') except CommandError as e: raise AssertionError('Pyeapi error getting startup-config: {}' .format(e)) except Exception as e: raise AssertionError('eAPI execute command: {}'.format(e)) else: try: return self._connection.current.startup_config except CommandError as e: raise AssertionError('Pyeapi error getting startup-config: {}' .format(e)) except Exception as e: raise AssertionError('eAPI execute command: {}'.format(e)) def get_running_config(self, section=None): """ The Get Running Config keyword retrieves the running config from the node as a string. Arguments: - 'section' (regex): If supplied, the section regex will be matched to return the indicated block of the running config. If omitted, Get Running Config returns the entire running config. Example: | ${running}= | Get Running Config | | ${running}= | Get Running Config | section=^management api http-commands$ | ${running}= | Get Running Config | section=^interface Ethernet1 | ${running}= | Get Running Config | ^interface Ethernet2 """ if section: try: return self._connection.current.section(section) except CommandError as e: raise AssertionError('Pyeapi error getting running-config: {}' .format(e)) except Exception as e: raise AssertionError('eAPI execute command: {}'.format(e)) else: try: return self._connection.current.running_config except CommandError as e: raise AssertionError('Pyeapi error getting running-config: {}' .format(e)) except Exception as e: raise AssertionError('eAPI execute command: {}'.format(e)) def config(self, commands): """ The Config keyword lets you configures the node with the specified commands. This method is used to send configuration commands to the node. It will take either a string or a list and prepend the necessary commands to put the session into config mode. Arguments: - `command` (str, list): The commands to send to the node in config mode. If the commands argument is a string it will be cast to a list. The list of commands will also be prepended with the necessary commands to put the session in config mode. Example: | @{commands}= | Create List | show version | show hostname | | ${config}= | Config | ${commands} | | """ if isinstance(commands, six.string_types): commands = [commands] try: return self._connection.current.config(commands) except CommandError as e: raise AssertionError('eAPI config CommandError:' ' {} {}'.format(e, commands)) except Exception as e: raise AssertionError('eAPI execute command: {}'.format(e)) configure = config def version_should_contain(self, version): """Version Should Contain compares the EOS version running on your node with the string provided. It is flexible in that it does not require an exact match - e.g. 4.14 == 4.14.0F. Example: | Version Should Contain | 4.14.0F | This keyword evaluates the 'Software image version' from 'Show Version' Example: | veos-node# show version | Arista vEOS | Hardware version: | Serial number: | System MAC address: 0011.2233.4455 | | *Software image version: 4.14.2F* | Architecture: i386 | Internal build version: 4.14.2F-2083164.4142F.1 | Internal build ID: 19fe6cb3-1777-40b6-a4e6-53875b30658c | | Uptime: 21 hours and 59 minutes | Total memory: 2028804 kB | Free memory: 285504 kB """ try: out = self._connection.current.enable(['show version'])[0]['result'] version_number = str(out['version']) except Exception as e: raise e return False if not re.search(str(version), version_number): raise AssertionError('Searched for %s, Found %s' % (str(version), version_number)) return True def list_extensions(self, available='any', installed='any'): """List Extensions returns a list with the name of each extension present on the node. Arguments: *available*: By default this is 'any', meaning the available status of \ the extension will not be used to filter to output. Only return 'Available' extensions: | available=True Only return 'Not Available' extensions | available=False *installed*: By default this is 'any', meaning the installed status of \ the extension will not be used to filter to output. Only return 'Installed' extensions: | installed=True Only return 'Not Installed' extensions | installed=False Only return extensions which were installed by force: | installed="forced" Sample 'Show Version' output from the CLI: | Name Version/Release Status extension | ------------------------------------------ ------------------------- ------ ---- | eos-puppet-conf-0.1-1.eos4.noarch.rpm 0.1/1.eos4 A, I 1 | puppet-3.7.1-3-ruby2.swix 3.7.1/1.eos4 A, I 14 | ruby-2.0.0-1.swix 2.0.0.353/16.fc14 A, I 11 | | A: available | NA: not available | I: installed | NI: not installed | F: forced Note: If you want all data pertaining to the extensions use the Get Extensions keyword. """ # Confirm parameter values are acceptable if available not in [True, False, 'any']: raise AssertionError('Incorrect parameter value: %s. ' 'Choose from [True|False|any]' % available) if installed not in [True, False, 'forced', 'any']: raise AssertionError('Incorrect parameter value: %s. ' 'Choose from [True|False|forced|any]' % installed) try: out = self._connection.current.enable(['show extensions']) out = out[0] except Exception as e: raise e if out['encoding'] == 'json': extensions = out['result']['extensions'] filtered = [] for ext, data in extensions.items(): if available and data['presence'] != 'present': continue elif not available and data['presence'] == 'present': continue if installed and data['status'] is not 'installed': continue elif installed == "forced" and data['status'] != \ 'forceInstalled': continue elif not installed and \ data['status'] != 'notInstalled': continue # If all of the checks above pass then we can append the # extension to the list filtered.append(ext) return filtered def refresh(self): """Refreshes the instance config properties This method will refresh the public running_config and startup_config properites of the currently active switch. Since the properties are lazily loaded, this method will clear the current internal instance variables. On the next call the instance variables will be repopulated with the current config Example: | Connect To | host=192.0.2.50 | transport=http | port=80 | username=myUser | password=secret | autorefresh=False | | @{commands}= | Create List | hostname newhostname | | | | | | ${config}= | Config | ${commands} | | | | | | ${before_refresh}= | Get Running Config | | | | | | | LOG | ${before_refresh} | level=DEBUG | | | | | | ${after_refresh}= | Get Running Config | | | | | | | LOG | ${after_refresh} | level=DEBUG | | | | | # NOQA | ${before_refresh} = ! Command: show running-config | ! device: vEOS1 (vEOS, EOS-4.14.0F) | ! | ! boot system flash:/vEOS-4.14.0F.swi | ! | alias ztpprep bash sudo /mnt/flash/ztpprep | ! | event-handler configpush | trigger on-s... | ${after_refresh} = ! Command: show running-config | ! device: newhostname (vEOS, EOS-4.14.0F) | ! | ! boot system flash:/vEOS-4.14.0F.swi | ! | alias ztpprep bash sudo /mnt/flash/ztpprep | ! | event-handler configpush | trigger on-s... """ self._connection.current.refresh() def ping_test(self, address, vrf='default', source_int=None): """ The Ping Test keyword pings the provided IP address from current device and returns the packet loss percentage. Arguments: - `address`: A text string identifying IP address to ping from the current switch. - `vrf`: A text string identifying the VRF to execute the ping within. Example: | ${loss_percent}= | Ping Test | 10.0.0.10 | | LOG | ${loss_percent} | level=DEBUG | | ${loss_percent}= | Ping Test | 1.1.1.1 | mgmt | This keyword parses and returns the loss percentage from the 'Ping' command. Example below would return 0. Example: | veos-node#ping vrf default 10.0.0.10 | PING 10.0.0.10 (10.0.0.10) 72(100) bytes of data. | 80 bytes from 10.0.0.10: icmp_seq=1 ttl=64 time=18.7 ms | 80 bytes from 10.0.0.10: icmp_seq=2 ttl=64 time=21.3 ms | 80 bytes from 10.0.0.10: icmp_seq=3 ttl=64 time=22.4 ms | 80 bytes from 10.0.0.10: icmp_seq=4 ttl=64 time=21.5 ms | 80 bytes from 10.0.0.10: icmp_seq=5 ttl=64 time=20.9 ms | | --- 10.0.0.10 ping statistics --- | 5 packets transmitted, 5 received, 0% packet loss, time 78ms | rtt min/avg/max/mdev = 18.771/20.999/22.424/1.231 ms, pipe 2, ipg/ewma 19.584/19.907 ms """ source = '' if source_int: source = ' source %s' % source_int try: out = self._connection.current.enable( ['ping vrf %s %s%s' % (vrf, address, source)], encoding='text') out = out[0]['result'] except Exception as e: raise e pattern = r'(\d+)% packet loss' match = re.search(pattern, out['output']) if not match or not match.group(1): raise AssertionError('No packet loss percentage found' ' in output %s.' % out['output']) return match.group(1) def address_is_reachable(self, address, vrf='default', source_int=None): """ The Address Is Reachable keyword checks if the provided IP address is reachable from the current device. The address is considered reachable if the ping result does not have 100% packet loss. Arguments: - `address`: A text string identifying IP address to ping from the current switch. - `vrf`: A text string identifying the VRF to execute the ping within. Example: | ${reachable}= | Address Is Reachable | 1.1.1.1 | | ${reachable}= | Address Is Reachable | 1.1.1.1 | mgmt | """ loss_percent = self.ping_test(address, vrf, source_int) if loss_percent == '100': return False return True def eapi_command(self, cmd, revision=None): """ Returns a properly formatted JSON object for an eAPI command with an optionally-specified output revision. Arguments: - `cmd`: A text string containing the EOS command to execute - `revision`: (Optional) The desired output revision Example: | ${commands}= | eapi Command | show cvx | revision=2 | | Log List | ${commands} | level=DEBUG | | | Get Command Output | cmd=${commands} | | | | Expect | clusterMode | is | False | | ${command1}= | eapi Command | show cvx | revision=2 | | ${command2}= | show version | | | | ${commands}= | Create List | ${command1} | ${command2} | | Log List | ${commands} | level=DEBUG | | | ${output}= | Enable | ${commands} | | """ command = {} command['cmd'] = str(cmd) if revision is not None: command['revision'] = int(revision) return [command]
class WinRMLibrary(object): """ Robot Framework library for Windows Remote Management, based on pywinrm. == Enable Windows Remote Shell == - [ http://support.microsoft.com/kb/555966 | KB-555966 ] - Execute on windows server: | winrm set winrm/config/client/auth @{Basic="true"} | winrm set winrm/config/service/auth @{Basic="true"} | winrm set winrm/config/service @{AllowUnencrypted="true"} == Dependence == | pywinrm | https://pypi.python.org/pypi/pywinrm | | robot framework | http://robotframework.org | """ ROBOT_LIBRARY_SCOPE='GLOBAL' def __init__(self): self._session=None self._cache=ConnectionCache('No sessions created') def create_session (self, alias, hostname, login, password): """ Create session with windows host. Does not support domain authentification. *Args:*\n _alias_ - robot framework alias to identify the session\n _hostname_ - windows hostname (not IP)\n _login_ - windows local login\n _password_ - windows local password *Returns:*\n Session index *Example:*\n | Create Session | server | windows-host | Administrator | 1234567890 | """ logger.debug ('Connecting using : hostname=%s, login=%s, password=%s '%(hostname, login, password)) self._session=winrm.Session(hostname, (login, password)) return self._cache.register(self._session, alias) def run_cmd (self, alias, command, params=None): """ Execute command on remote mashine. *Args:*\n _alias_ - robot framework alias to identify the session\n _command_ - windows command\n _params_ - lists of command's parameters *Returns:*\n Result object with methods: status_code, std_out, std_err. *Example:*\n | ${params}= | Create List | "/all" | | ${result}= | Run cmd | server | ipconfig | ${params} | | Log | ${result.status_code} | | Log | ${result.std_out} | | Log | ${result.std_err} | =>\n | 0 | Windows IP Configuration | Host Name . . . . . . . . . . . . : WINDOWS-HOST | Primary Dns Suffix . . . . . . . : | Node Type . . . . . . . . . . . . : Hybrid | IP Routing Enabled. . . . . . . . : No | WINS Proxy Enabled. . . . . . . . : No | """ if params is not None: log_cmd=command+' '+' '.join(params) else: log_cmd=command logger.info ('Run command on server with alias "%s": %s '%(alias, log_cmd)) self._session=self._cache.switch(alias) result=self._session.run_cmd (command, params) return result def run_ps (self, alias, script): """ Run power shell script on remote mashine. *Args:*\n _alias_ - robot framework alias to identify the session\n _script_ - power shell script\n *Returns:*\n Result object with methods: status_code, std_out, std_err. *Example:*\n | ${result}= | Run ps | server | get-process iexplore|select -exp ws|measure-object -sum|select -exp Sum | | Log | ${result.status_code} | | Log | ${result.std_out} | | Log | ${result.std_err} | =>\n | 0 | 56987648 | """ logger.info ('Run power shell script on server with alias "%s": %s '%(alias, script)) self._session=self._cache.switch(alias) result=self._session.run_ps (script) return result def delete_all_sessions(self): """ Removes all sessions with windows hosts""" self._cache.empty_cache()
class OracleDB(object): """ Robot Framework library for working with Oracle DB. == Dependencies == | cx_Oracle | http://cx-oracle.sourceforge.net | version > 3.0 | | robot framework | http://robotframework.org | """ ROBOT_LIBRARY_SCOPE = 'GLOBAL' last_executed_statement = None last_executed_statement_params = None def __init__(self): """Library initialization. Robot Framework ConnectionCache() class is prepared for working with concurrent connections.""" self._connection = None self._cache = ConnectionCache() def connect_to_oracle(self, dbname, dbusername, dbpassword, alias=None): """ Connection to Oracle DB. *Args:*\n _dbname_ - database name;\n _dbusername_ - username for db connection;\n _dbpassword_ - password for db connection;\n _alias_ - connection alias, used for switching between open connections;\n *Returns:*\n Returns ID of the new connection. The connection is set as active. *Example:*\n | Connect To Oracle | rb60db | bis | password | """ try: logger.debug( 'Connecting using : dbname=%s, dbusername=%s, dbpassword=%s ' % (dbname, dbusername, dbpassword)) connection_string = '%s/%s@%s' % (dbusername, dbpassword, dbname) self._connection = cx_Oracle.connect(connection_string) return self._cache.register(self._connection, alias) except cx_Oracle.DatabaseError as err: raise Exception("Logon to oracle Error:", str(err)) def disconnect_from_oracle(self): """ Close active Oracle connection. *Example:*\n | Connect To Oracle | rb60db | bis | password | | Disconnect From Oracle | """ self._connection.close() self._cache.empty_cache() def close_all_oracle_connections(self): """ Close all Oracle connections that were opened. You should not use [#Disconnect From Oracle|Disconnect From Oracle] and [#Close All Oracle Connections|Close All Oracle Connections] together. After calling this keyword connection IDs returned by opening new connections [#Connect To Oracle|Connect To Oracle], will start from 1. *Example:*\n | Connect To Oracle | rb60db | bis | password | alias=bis | | Connect To Oracle | rb60db | bis_dcs | password | alias=bis_dsc | | Switch Oracle Connection | bis | | @{sql_out_bis}= | Execute Sql String | select SYS_CONTEXT('USERENV', 'CURRENT_SCHEMA') from dual | | Switch Oracle Connection | bis_dsc | | @{sql_out_bis_dsc}= | Execute Sql String | select SYS_CONTEXT('USERENV', 'CURRENT_SCHEMA') from dual | | Close All Oracle Connections | """ self._connection = self._cache.close_all() def switch_oracle_connection(self, index_or_alias): """ Switch between existing Oracle connections using their connection IDs or aliases. The connection ID is obtained on creating connection. Connection alias is optional and can be set at connecting to DB [#Connect To Oracle|Connect To Oracle]. *Args:*\n _index_or_alias_ - connection ID or alias assigned to connection; *Returns:*\n ID of the previous connection. *Example:* (switch by alias)\n | Connect To Oracle | rb60db | bis | password | alias=bis | | Connect To Oracle | rb60db | bis_dcs | password | alias=bis_dsc | | Switch Oracle Connection | bis | | @{sql_out_bis}= | Execute Sql String | select SYS_CONTEXT('USERENV', 'CURRENT_SCHEMA') from dual | | Switch Oracle Connection | bis_dsc | | @{sql_out_bis_dsc}= | Execute Sql String | select SYS_CONTEXT('USERENV', 'CURRENT_SCHEMA') from dual | | Close All Oracle Connections | =>\n @{sql_out_bis} = BIS\n @{sql_out_bis_dcs}= BIS_DCS *Example:* (switch by index)\n | ${bis_index}= | Connect To Oracle | rb60db | bis | password | | ${bis_dcs_index}= | Connect To Oracle | rb60db | bis_dcs | password | | @{sql_out_bis_dcs_1}= | Execute Sql String | select SYS_CONTEXT('USERENV', 'CURRENT_SCHEMA') from dual | | ${previous_index}= | Switch Oracle Connection | ${bis_index} | | @{sql_out_bis}= | Execute Sql String | select SYS_CONTEXT('USERENV', 'CURRENT_SCHEMA') from dual | | Switch Oracle Connection | ${previous_index} | | @{sql_out_bis_dcs_2}= | Execute Sql String | select SYS_CONTEXT('USERENV', 'CURRENT_SCHEMA') from dual | | Close All Oracle Connections | =>\n ${bis_index}= 1\n ${bis_dcs_index}= 2\n @{sql_out_bis_dcs_1} = BIS_DCS\n ${previous_index}= 2\n @{sql_out_bis} = BIS\n @{sql_out_bis_dcs_2}= BIS_DCS """ old_index = self._cache.current_index self._connection = self._cache.switch(index_or_alias) return old_index def _execute_sql(self, cursor, statement, params): """ Execute SQL query on Oracle DB using active connection. *Args*:\n _cursor_: cursor object.\n _statement_: SQL query to be executed.\n _params_: SQL query parameters.\n *Returns:*\n Query results. """ statement_with_params = self._replace_parameters_in_statement( statement, params) logger.info(statement_with_params, html=True) cursor.prepare(statement) self.last_executed_statement = self._replace_parameters_in_statement( statement, params) return cursor.execute(None, params) def _replace_parameters_in_statement(self, statement, params): """Update SQL query parameters, if any exist, with their values for logging purposes. *Args*:\n _statement_: SQL query to be updated.\n _params_: SQL query parameters.\n *Returns:*\n SQL query with parameter names replaced with their values. """ params_keys = sorted(params.keys(), reverse=True) for key in params_keys: if isinstance(params[key], (int, float)): statement = statement.replace(':{}'.format(key), str(params[key])) else: statement = statement.replace(':{}'.format(key), "'{}'".format(params[key])) return statement def execute_plsql_block(self, plsqlstatement, **params): """ PL\SQL block execution. *Args:*\n _plsqlstatement_ - PL\SQL block;\n _params_ - PL\SQL block parameters;\n *Raises:*\n PLSQL Error: Error message encoded according to DB where the code was run *Returns:*\n PL\SQL block execution result. *Example:*\n | *Settings* | *Value* | | Library | OracleDB | | *Variables* | *Value* | | ${var_failed} | 3 | | *Test Cases* | *Action* | *Argument* | *Argument* | *Argument* | | Simple | | | ${statement}= | catenate | SEPARATOR=\\r\\n | DECLARE | | | ... | | | a NUMBER := ${var_failed}; | | | ... | | | BEGIN | | | ... | | | a := a + 1; | | | ... | | | if a = 4 then | | | ... | | | raise_application_error ( -20001, 'This is a custom error' ); | | | ... | | | end if; | | | ... | | | END; | | | Execute Plsql Block | plsqlstatement=${statement} | =>\n DatabaseError: ORA-20001: This is a custom error | | ${statement}= | catenate | SEPARATOR=\\r\\n | DECLARE | | | ... | | | a NUMBER := :var; | | | ... | | | BEGIN | | | ... | | | a := a + 1; | | | ... | | | if a = 4 then | | | ... | | | raise_application_error ( -20001, 'This is a custom error' ); | | | ... | | | end if; | | | ... | | | END; | | | Execute Plsql Block | plsqlstatement=${statement} | var=${var_failed} | =>\n DatabaseError: ORA-20001: This is a custom error """ cursor = None try: cursor = self._connection.cursor() self._execute_sql(cursor, plsqlstatement, params) self._connection.commit() finally: if cursor: self._connection.rollback() def execute_plsql_block_with_dbms_output(self, plsqlstatement, **params): """ Execute PL\SQL block with dbms_output(). *Args:*\n _plsqlstatement_ - PL\SQL block;\n _params_ - PL\SQL block parameters;\n *Raises:*\n PLSQL Error: Error message encoded according to DB where the code was run. *Returns:*\n List of values returned by Oracle dbms_output.put_line(). *Example:*\n | *Settings* | *Value* | | Library | OracleDB | | *Variables* | *Value* | | ${var} | 4 | | *Test Cases* | *Action* | *Argument* | *Argument* | *Argument* | | Simple | | | ${statement}= | catenate | SEPARATOR=\\r\\n | DECLARE | | | ... | | | a NUMBER := ${var}; | | | ... | | | BEGIN | | | ... | | | a := a + 1; | | | ... | | | if a = 4 then | | | ... | | | raise_application_error ( -20001, 'This is a custom error' ); | | | ... | | | end if; | | | ... | | | dbms_output.put_line ('text '||a||', e-mail text'); | | | ... | | | dbms_output.put_line ('string 2 '); | | | ... | | | END; | | | @{dbms}= | Execute Plsql Block With Dbms Output | plsqlstatement=${statement} | =>\n | @{dbms} | text 5, e-mail text | | | string 2 | | | ${statement}= | catenate | SEPARATOR=\\r\\n | DECLARE | | | ... | | | a NUMBER := :var; | | | ... | | | BEGIN | | | ... | | | a := a + 1; | | | ... | | | if a = 4 then | | | ... | | | raise_application_error ( -20001, 'This is a custom error' ); | | | ... | | | end if; | | | ... | | | dbms_output.put_line ('text '||a||', e-mail text'); | | | ... | | | dbms_output.put_line ('string 2 '); | | | ... | | | END; | | | @{dbms}= | Execute Plsql Block With Dbms Output | plsqlstatement=${statement} | var=${var} | =>\n | @{dbms} | text 5, e-mail text | | | string 2 | """ cursor = None dbms_output = [] try: cursor = self._connection.cursor() cursor.callproc("dbms_output.enable") self._execute_sql(cursor, plsqlstatement, params) self._connection.commit() statusvar = cursor.var(cx_Oracle.NUMBER) linevar = cursor.var(cx_Oracle.STRING) while True: cursor.callproc("dbms_output.get_line", (linevar, statusvar)) if statusvar.getvalue() != 0: break dbms_output.append(linevar.getvalue()) return dbms_output finally: if cursor: self._connection.rollback() def execute_plsql_script(self, file_path, **params): """ Execution of PL\SQL code from file. *Args:*\n _file_path_ - path to PL\SQL script file;\n _params_ - PL\SQL code parameters;\n *Raises:*\n PLSQL Error: Error message encoded according to DB where the code was run. *Example:*\n | Execute Plsql Script | ${CURDIR}${/}plsql_script.sql | | Execute Plsql Script | ${CURDIR}${/}plsql_script.sql | first_param=1 | second_param=2 | """ with open(file_path, "r") as script: data = script.read() self.execute_plsql_block(data, **params) def execute_sql_string(self, plsqlstatement, **params): """ Execute PL\SQL string. *Args:*\n _plsqlstatement_ - PL\SQL string;\n _params_ - PL\SQL string parameters;\n *Raises:*\n PLSQL Error: Error message encoded according to DB where the code was run. *Returns:*\n PL\SQL string execution result. *Example:*\n | @{query}= | Execute Sql String | select sysdate, sysdate+1 from dual | | Set Test Variable | ${sys_date} | ${query[0][0]} | | Set Test Variable | ${next_date} | ${query[0][1]} | | @{query}= | Execute Sql String | select sysdate, sysdate+:d from dual | d=1 | | Set Test Variable | ${sys_date} | ${query[0][0]} | | Set Test Variable | ${next_date} | ${query[0][1]} | """ cursor = None try: cursor = self._connection.cursor() self._execute_sql(cursor, plsqlstatement, params) query_result = cursor.fetchall() self.result_logger(query_result) return query_result finally: if cursor: self._connection.rollback() def execute_sql_string_mapped(self, sql_statement, **params): """SQL query execution where each result row is mapped as a dict with column names as keys. *Args:*\n _sql_statement_ - PL\SQL string;\n _params_ - PL\SQL string parameters;\n *Returns:*\n A list of dictionaries where column names are mapped as keys. *Example:*\n | @{query}= | Execute Sql String Mapped| select sysdate, sysdate+1 from dual | | Set Test Variable | ${sys_date} | ${query[0][sysdate]} | | Set Test Variable | ${next_date} | ${query[0][sysdate1]} | | @{query}= | Execute Sql String Mapped| select sysdate, sysdate+:d from dual | d=1 | | Set Test Variable | ${sys_date} | ${query[0][sysdate]} | | Set Test Variable | ${next_date} | ${query[0][sysdate1]} | """ cursor = None try: cursor = self._connection.cursor() self._execute_sql(cursor, sql_statement, params) col_name = tuple(i[0] for i in cursor.description) query_result = [dict(zip(col_name, row)) for row in cursor] self.result_logger(query_result) return query_result finally: if cursor: self._connection.rollback() def execute_sql_string_generator(self, sql_statement, **params): """Generator that yields each result row mapped as a dict with column names as keys.\n Intended for use mainly in code for other keywords. *If used, the generator must be explicitly closed before closing DB connection* *Args:*\n _sql_statement_ - PL\SQL string;\n _params_ - PL\SQL string parameters;\n Yields:*\n results dict. """ cursor = None self.last_executed_statement = sql_statement self.last_executed_statement_params = params try: cursor = self._connection.cursor() self._execute_sql(cursor, sql_statement, params) col_name = tuple(i[0] for i in cursor.description) for row in cursor: yield dict(zip(col_name, row)) finally: if cursor: self._connection.rollback() def result_logger(self, query_result, result_amount=10): """Log first n rows from the query results *Args:*\n _query_result_ - query result to log, must be greater than 0 _result_amount_ - amount of entries to display from result """ if len(query_result) > result_amount > 0: query_result = query_result[:result_amount] logger.info(query_result, html=True)
class SessionManager(object): """Session manager keywords for DynamoDB operations.""" def __init__(self): self._builtin = BuiltIn() self._cache = ConnectionCache('No sessions.') def create_dynamodb_session(self, *args, **kwargs): # pylint: disable=line-too-long """Create DynamoDB session object. Arguments: - ``region``: The name of AWS region. - ``session``: The session object to AWS connection. (Optional) - ``profile``: The profile name to be use to create the session. (Optional) - ``access_key``: If ``session`` is None, use this access key to create the session. (Optional) - ``secret_key``: If ``session`` is None, use this secret key to create the session. (Optional) - ``session_token``: If ``session`` is None, use this session token to create the session. (Optional) - ``host``: The address of the host. Use this to connect to a local instance. (Optional) - ``port``: Connect to the host on this port. (Default 80) - ``is_secure``: Enforce https connection. (Default True) - ``label``: A case and space insensitive string to identify the DynamoDB session. (Default ``region``) Examples: | Create DynamoDB Session | | | | | # Use default config | | Create DynamoDB Session | us-west-1 | | | | # Use default profile | | Create DynamoDB Session | us-west-1 | profile=profile1 | | | # Use profile1 | | Create DynamoDB Session | us-west-1 | access_key=KEY | secret_key=SECRET | | # Label is us-west-1 | | Create DynamoDB Session | us-west-1 | access_key=KEY | secret_key=SECRET | label=LABEL | # Label is LABEL | """ # pylint: disable=line-too-long kargs = dict(enumerate(args)) region = kargs.get(0, kwargs.pop('region', None)) label = kwargs.pop('label', region) session = Engine() # pylint: disable=protected-access session._session = kwargs.pop('session', None) # pylint: disable=protected-access if session._session is None: # pylint: disable=protected-access session._session = self._get_session(region=region, **kwargs) # pylint: disable=protected-access client = self._get_client(session._session, region=region, **kwargs) kwargs.pop('access_key', None) kwargs.pop('host', None) kwargs.pop('is_secure', None) kwargs.pop('port', None) kwargs.pop('profile', None) kwargs.pop('secret_key', None) kwargs.pop('session_token', None) session.connection = DynamoDBConnection(client, **kwargs) if label is None: label = session.connection.region # pylint: disable=protected-access self._builtin.log('Creating DynamoDB session: %s' % label, 'DEBUG') self._cache.register(session, alias=label) return label def delete_all_dynamodb_sessions(self): """Removes all DynamoDB sessions.""" self._cache.empty_cache() def delete_dynamodb_session(self, label): """Removes DynamoDB session. Arguments: - ``label``: A case and space insensitive string to identify the DynamoDB session. (Default ``region``) Examples: | Delete DynamoDB Session | LABEL | """ self._cache.switch(label) index = self._cache.current_index # pylint: disable=protected-access self._cache.current = self._cache._no_current # pylint: disable=protected-access self._cache._connections[index - 1] = None # pylint: disable=protected-access self._cache._aliases['x-%s-x' % label] = self._cache._aliases.pop(label) def _get_client(self, session, **kwargs): """Returns boto3 client session object.""" client_kwargs = {} host = kwargs.pop('host', None) is_secure = kwargs.pop('is_secure', True) port = kwargs.pop('port', None) region = kwargs.pop('region', None) url = self._get_url(host, port, is_secure) if region is not None: client_kwargs['region_name'] = region if url is not None: client_kwargs['endpoint_url'] = url if not is_secure: client_kwargs['use_ssl'] = is_secure return session.client('dynamodb', **client_kwargs) @staticmethod def _get_session(**kwargs): """Returns boto3 session object.""" access_key = kwargs.pop('access_key', None) profile = kwargs.pop('profile', None) region = kwargs.pop('region', None) session_kwargs = {} token = kwargs.pop('session_token', None) if access_key is not None: session_kwargs['aws_access_key_id'] = access_key session_kwargs['aws_secret_access_key'] = kwargs.pop('secret_key', None) if profile is not None: session_kwargs['profile_name'] = profile if region is not None: session_kwargs['region_name'] = region if token is not None: session_kwargs['aws_session_token'] = token return Session(**session_kwargs) @staticmethod def _get_url(host, port, is_secure=True): """Returns pre-format host endpoint URL.""" url = None if host is not None: protocol = 'https' if is_secure else 'http' url = '%s://%s' % (protocol, host) if port is not None: url += ':%d' % int(port) return url
class REST(object): def __init__(self): self._builtin = BuiltIn() self._cache = ConnectionCache() def create_rest_session(self, alias, headers=None, auth=None, verify=False, cert=None): """ Creates REST session with specified alias. Arguments: | alias | session alias | | headers | custom headers for all requests | | auth | basic auth | | verify | SSL verification | | cert | path to SSL certificate file | Example usage: | ${headers} | Create Dictionary | Content-Type | application/json | | @{service_basic_auth} | Set Variable | username | password | | Create Rest Session | session_alias | headers=${headers} | auth=${service_basic_auth} | verify=False | """ session = Session() if headers: session.headers.update(headers) if auth: session.auth = tuple(auth) session.verify = self._builtin.convert_to_boolean(verify) session.cert = cert self._cache.register(session, alias) def head(self, alias, url, params=None, headers=None, cookies=None, timeout=10): """ Sends HEAD request. Arguments: | alias | session alias | | url | service url | | params | request parameters | | headers | custom headers for request, rewrites session headers | | cookies | custom request cookies | | timeout | response timeout in seconds, raise exception on request timeout | Example usage: | ${payload} | Create Dictionary | param1 | value1 | param2 | value2 | | ${cookies} | Create Dictionary | sessionid | session12345 | | ${response} | Head | session_alias | http://localhost/service | params=${payload} | cookies=${cookies} | timeout=5 | """ logger.info("Sending HEAD request to: '%s', session: '%s'" % (url, alias)) session = self._cache.switch(alias) response = session.head(url, params=params, headers=headers, cookies=cookies, timeout=int(timeout)) return {"status": response.status_code, "headers": response.headers} def get(self, alias, url, params=None, headers=None, cookies=None, timeout=10): """ Sends GET request. See arguments description in `Head` keyword. """ logger.info("Sending GET request to: '%s', session: '%s'" % (url, alias)) session = self._cache.switch(alias) response = session.get(url, params=params, headers=headers, cookies=cookies, timeout=int(timeout)) try: return {"status": response.status_code, "headers": response.headers, "body": response.json()} except ValueError: return {"status": response.status_code, "headers": response.headers, "body": response.content} def post(self, alias, url, headers=None, cookies=None, data=None, files=None, timeout=10): """ Sends POST request. Arguments: | alias | session alias | | url | service url | | headers | custom headers for request, rewrites session headers | | cookies | custom request cookies | | data | dictionary, bytes, or file-like object to send in the body of the request | | files | dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload | | timeout | response timeout in seconds, raise exception on request timeout | Example usage: | @{files} | Set Variable | path_to_file_1 | path_to_file_2 | | ${mpe_files} | Convert To Multipart Encoded Files | ${files} | | ${payload} | Set Variable | {"id": "34","doc_type": "history"} | | ${response} | Post | service_alias | http://localhost/service | data=${payload} | | ${response} | Post | service_alias | http://localhost/service | files=${mpe_files} | """ logger.info("Sending POST request to: '%s', session: '%s'" % (url, alias)) session = self._cache.switch(alias) response = session.post(url, headers=headers, cookies=cookies, data=data.encode("utf-8"), files=files, timeout=int(timeout)) try: return {"status": response.status_code, "headers": response.headers, "body": response.json()} except ValueError: return {"status": response.status_code, "headers": response.headers, "body": response.content} def put(self, alias, url, headers=None, data=None, cookies=None, timeout=10): """ Sends PUT request. See arguments description in `Post` keyword. """ logger.info("Sending PUT request to: '%s', session: '%s'" % (url, alias)) session = self._cache.switch(alias) response = session.put(url, headers=headers, cookies=cookies, data=data.encode("utf-8"), timeout=int(timeout)) try: return {"status": response.status_code, "headers": response.headers, "body": response.json()} except ValueError: return {"status": response.status_code, "headers": response.headers, "body": response.content} def delete(self, alias, url, headers=None, data=None, cookies=None, timeout=10): """ Sends DELETE request. See arguments description in `Post` keyword. """ logger.info("Sending DELETE request to: '%s', session: '%s'" % (url, alias)) session = self._cache.switch(alias) response = session.delete(url, headers=headers, cookies=cookies, data=data.encode("utf-8"), timeout=int(timeout)) try: return {"status": response.status_code, "headers": response.headers, "body": response.json()} except ValueError: return {"status": response.status_code, "headers": response.headers, "body": response.content} def close_all_sessions(self): """ Closes all created sessions. """ self._cache.empty_cache() @staticmethod def convert_to_multipart_encoded_files(files): """ Converts list of files to multipart encoded files. Example usage: | @{files} | Set Variable | path_to_file_1 | path_to_file_2 | | ${mpe_files} | Convert To Multipart Encoded Files | ${files} | """ mpe_files = [] for f in files: form_field_name = f[0] file_name = path.basename(f[1]) file_path = f[1] mime_type = f[2] mpe_files.append((form_field_name, (file_name, open(file_path, "rb"), mime_type))) return mpe_files