def test_equality(self): assert_true(self.dd == self.dd) assert_false(self.dd != self.dd) assert_true(self.dd == DotDict(self.dd)) assert_false(self.dd != DotDict(self.dd)) assert_false(self.dd == DotDict()) assert_true(self.dd != DotDict())
def wait_for_comm_cycle(start_boot_seconds, quiet=None): r""" Wait for communications to the BMC to stop working and then resume working. This function is useful when you have initiated some kind of reboot. Description of arguments: start_boot_seconds The time that the boot test started. The format is the epoch time in seconds, i.e. the number of seconds since 1970-01-01 00:00:00 UTC. This value should be obtained from the BMC so that it is not dependent on any kind of synchronization between this machine and the target BMC This will allow this program to work correctly even in a simulated environment. This value should be obtained by the caller prior to initiating a reboot. It can be obtained as follows: state = st.get_state(req_states=['epoch_seconds']) """ quiet = int(gp.get_var_value(quiet, 0)) # Validate parms. error_message = gv.svalid_integer(start_boot_seconds, var_name="start_boot_seconds") if error_message != "": BuiltIn().fail(gp.sprint_error(error_message)) match_state = anchor_state(DotDict([('packet_loss', '100')])) # Wait for 100% packet loss trying to ping machine. wait_state(match_state, wait_time="8 mins", interval="0 seconds") match_state['packet_loss'] = '^0$' # Wait for 0% packet loss trying to ping machine. wait_state(match_state, wait_time="8 mins", interval="0 seconds") # Get the uptime and epoch seconds for comparisons. We want to be sure # that the uptime is less than the elapsed boot time. Further proof that # a reboot has indeed occurred (vs random network instability giving a # false positive. state = get_state(req_states=['uptime', 'epoch_seconds'], quiet=quiet) elapsed_boot_time = int(state['epoch_seconds']) - start_boot_seconds gp.qprint_var(elapsed_boot_time) if int(float(state['uptime'])) < elapsed_boot_time: uptime = state['uptime'] gp.qprint_var(uptime) gp.qprint_timen("The uptime is less than the elapsed boot time," + " as expected.") else: error_message = "The uptime is greater than the elapsed boot time," +\ " which is unexpected:\n" +\ gp.sprint_var(start_boot_seconds) +\ gp.sprint_var(state) BuiltIn().fail(gp.sprint_error(error_message)) gp.qprint_timen("Verifying that REST API interface is working.") match_state = DotDict([('rest', '^1$')]) state = wait_state(match_state, wait_time="5 mins", interval="2 seconds")
def test_init(self): assert_true(DotDict() == DotDict({}) == DotDict([])) assert_true(DotDict(a=1) == DotDict({'a': 1}) == DotDict([('a', 1)])) assert_true(DotDict({'a': 1}, b=2) == DotDict({'a': 1, 'b': 2}) == DotDict([('a', 1), ('b', 2)])) assert_raises(TypeError, DotDict, None)
def replace(self, positional, named, variables=None): # `variables` is None in dry-run mode and when using Libdoc if variables: positional = variables.replace_list(positional, self._resolve_until) named = DotDict( self._replace_named(named, variables.replace_scalar)) else: positional = list(positional) named = DotDict(item for item in named if isinstance(item, tuple)) return positional, named
def test_order_does_not_affect_equality(self): d = dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7) od1 = OrderedDict(sorted(d.items())) od2 = OrderedDict(reversed(list(od1.items()))) dd1 = DotDict(sorted(d.items())) dd2 = DotDict(reversed(list(dd1.items()))) for d1, d2 in [(dd1, dd2), (dd1, d), (dd2, d), (dd1, od1), (dd2, od2)]: assert_equal(d1, d2) assert_equal(d2, d1) if not IRONPYTHON: # https://github.com/IronLanguages/main/issues/1168 for d1, d2 in [(dd1, od2), (dd2, od1)]: assert_equal(d1, d2) assert_equal(d2, d1) assert_not_equal(od1, od2)
def __call__(self, outputs, datetime=None) -> bool: command_out = outputs.get('stdout', None) time_output = outputs.get('stderr', None) rc = outputs.get('rc') try: exp_rc = self.options.get('rc', None) if exp_rc: if rc not in [int(_rc) for _rc in re.split(r'\s*\|\s*', exp_rc)]: raise AssertionError( f"Result return rc {rc} not match expected\nStdOut:\n\t{command_out}\nStdErr:\n\t{time_output}") data = time_output.split(',') row_dict = DotDict(**{k: v.replace('%', '') for (k, v) in [entry.split(':', 1) for entry in data]}) for k in row_dict.keys(): if k == 'Command': continue row_dict.update({k: float(row_dict[k])}) logger.info(f"Command: {row_dict.get('Command')} [Rc: {row_dict.get('Rc')}]") row = self.table.template(self.host_id, None, *tuple(list(row_dict.values()) + [-1])) du = model.data_factory(self.table, row, output=command_out, datetime=datetime) self.data_handler(du) return True except Exception as e: f, li = get_error_info() logger.error(f"{self.__class__.__name__}: {e}; File: {f}:{li}") raise RunnerError(f"{self}", f"{e}; File: {f}:{li}")
def get_cookies(self, as_dict=False): """Returns all cookies of the current page. If ``as_dict`` argument evaluates as false, see `Boolean arguments` for more details, then cookie information is returned as a single string in format ``name1=value1; name2=value2; name3=value3``. When ``as_dict`` argument evaluates as true, cookie information is returned as Robot Framework dictionary format. The string format can be used, for example, for logging purposes or in headers when sending HTTP requests. The dictionary format is helpful when the result can be passed to requests library's Create Session keyword's optional cookies parameter. The `` as_dict`` argument is new in SeleniumLibrary 3.3 """ if is_falsy(as_dict): pairs = [] for cookie in self.driver.get_cookies(): pairs.append(f"{cookie['name']}={cookie['value']}") return "; ".join(pairs) else: pairs = DotDict() for cookie in self.driver.get_cookies(): pairs[cookie["name"]] = cookie["value"] return pairs
def __call__(self, **updates) -> Tuple[str, Iterable[Iterable]]: output_ref = CacheLines().upload(self._output) for i in range(0, len(self._data)): _template = DotDict(self._data[i]._asdict()) _template.update({'OUTPUT_REF': output_ref}) self._data[i] = self.table.template(*list(_template.values())) return super().__call__(**updates)
def __init__(self, row_key_field_name='Description', init_fields_dict=dict(), obj_name='tally_sheet'): r""" Create a tally sheet object. Description of arguments: row_key_field_name The name of the row key field (e.g. boot_type, team_name, etc.) init_fields_dict A dictionary which contains field names/initial values. obj_name The name of the tally sheet. """ self.__obj_name = obj_name # The row key field uniquely identifies the row. self.__row_key_field_name = row_key_field_name # Create a "table" which is an ordered dictionary. # If we're running python 2.7 or later, collections has an # OrderedDict we can use. Otherwise, we'll try to use the DotDict (a # robot library). If neither of those are available, we fail. try: self.__table = collections.OrderedDict() except AttributeError: self.__table = DotDict() # Save the initial fields dictionary. self.__init_fields_dict = init_fields_dict self.__totals_line = init_fields_dict self.__sum_fields = [] self.__calc_fields = []
def test_other_list_like_items_are_not_touched(self): value = ({'key': 'value'}, [{}]) d = DotDict(key=value) assert_equal(d.key[0]['key'], 'value') assert_false(hasattr(d.key[0], 'key')) assert_true(isinstance(d.key[0], dict)) assert_true(isinstance(d.key[1][0], dict))
def __init__(self): self.name = 'testmock' self.doc = 'cod' self.tags = ['foo', 'bar'] self.message = 'Expected failure' self.status = 'FAIL' self.data = DotDict({'name':self.name})
def add_row(self, row_key, init_fields_dict=None): r""" Add a row to the tally sheet. Description of arguments: row_key A unique key value. init_fields_dict A dictionary of field names/initial values. The number of fields in this dictionary must be the same as what was specified when the tally sheet was created. If no value is passed, the value used to create the tally sheet will be used. """ if row_key in self.__table: # If we allow this, the row values get re-initialized. message = "An entry for \"" + row_key + "\" already exists in" message += " tally sheet." raise ValueError(message) if init_fields_dict is None: init_fields_dict = self.__init_fields_dict try: self.__table[row_key] = collections.OrderedDict(init_fields_dict) except AttributeError: self.__table[row_key] = DotDict(init_fields_dict)
def _set_built_in_variables(self, settings): for name, value in [('${TEMPDIR}', abspath(tempfile.gettempdir())), ('${EXECDIR}', abspath('.')), ('${OPTIONS}', DotDict({ 'include': Tags(settings.include), 'exclude': Tags(settings.exclude), 'skip': Tags(settings.skip), 'skip_on_failure': Tags(settings.skip_on_failure) })), ('${/}', os.sep), ('${:}', os.pathsep), ('${\\n}', os.linesep), ('${SPACE}', ' '), ('${True}', True), ('${False}', False), ('${None}', None), ('${null}', None), ('${OUTPUT_DIR}', settings.output_directory), ('${OUTPUT_FILE}', settings.output or 'NONE'), ('${REPORT_FILE}', settings.report or 'NONE'), ('${LOG_FILE}', settings.log or 'NONE'), ('${DEBUG_FILE}', settings.debug_file or 'NONE'), ('${LOG_LEVEL}', settings.log_level), ('${PREV_TEST_NAME}', ''), ('${PREV_TEST_STATUS}', ''), ('${PREV_TEST_MESSAGE}', '')]: self[name] = GlobalVariableValue(value)
def result(self) -> Result: if self._process is None: raise RuntimeError("Process not started") if self._result is None: raise RuntimeError("No result set, call poll() or wait() first") self._is_pending = False if "error" in self._result: raise RuntimeError(self._result["error"]) result = DotDict() fields = self._result["value"] for element in self._elements: if is_input(element): key = element["name"] assert key in fields, f"Missing input value for '{key}'" result[key] = self._post_process_value(fields[key], element=element) elif is_submit(element): result["submit"] = fields["submit"] assert "submit" in result, "Missing submit value" return result
def download(self, url: str) -> DownloadedFile: """Download given url content. Keyword returns dictionary which contains downloaded file path and suggested filename as keys (saveAs and suggestedFilename). If the file URL cannot be found (the download is triggered by event handlers) use `Wait For Download`keyword. To enable downloads context's ``acceptDownloads`` needs to be true. To configure download directory use New Browser's ``downloadsPath`` settings With default filepath downloaded files are deleted when Context the download happened in is closed. This keyword requires that there is currently an open page. The keyword uses the current pages local state (cookies, sessionstorage, localstorage) for the download to avoid authentication problems. Example: | ${file_object}= `Download` ${url} | ${actual_size}= Get File Size ${file_object.saveAs} Example 2: | ${elem}= Get Element text="Download File" | ${href}= Get Property ${elem} href | ${file_object}= Download ${href} | ${file_path}= Set Variable ${file_object.saveAs} """ with self.playwright.grpc_channel() as stub: response = stub.Download(Request().Url(url=url)) logger.info(response.log) dot_dict = DotDict() for key, value in json.loads(response.json).items(): dot_dict[key] = value return dot_dict
def create_boot_table(file_path=None, os_host=""): r""" Read the boot table JSON file, convert it to an object and return it. Note that if the user is running without a global OS_HOST robot variable specified, this function will remove all of the "os_" start and end state requirements from the JSON data. Description of argument(s): file_path The path to the boot_table file. If this value is not specified, it will be obtained from the "BOOT_TABLE_PATH" environment variable, if set. Otherwise, it will default to "data/boot_table.json". If this value is a relative path, this function will use the code_base_dir_path as the base directory (see definition above). os_host The host name or IP address of the host associated with the machine being tested. If the user is running without an OS_HOST (i.e. if this argument is blank), we remove os starting and ending state requirements from the boot entries. """ if file_path is None: if redfish_support_trans_state: file_path = os.environ.get('BOOT_TABLE_PATH', 'data/boot_table_redfish.json') elif platform_arch_type == "x86": file_path = os.environ.get('BOOT_TABLE_PATH', 'data/boot_table_x86.json') else: file_path = os.environ.get('BOOT_TABLE_PATH', 'data/boot_table.json') if not file_path.startswith("/"): file_path = code_base_dir_path + file_path # Pre-process the file by removing blank lines and comment lines. temp = tempfile.NamedTemporaryFile() temp_file_path = temp.name cmd_buf = "egrep -v '^[ ]*$|^[ ]*#' " + file_path + " > " + temp_file_path gc.cmd_fnc_u(cmd_buf, quiet=1) boot_file = open(temp_file_path) boot_table = json.load(boot_file, object_hook=DotDict) # If the user is running without an OS_HOST, we remove os starting and ending state requirements from # the boot entries. if os_host == "": for boot in boot_table: state_keys = ['start', 'end'] for state_key in state_keys: for sub_state in list(boot_table[boot][state_key]): if sub_state.startswith("os_"): boot_table[boot][state_key].pop(sub_state, None) # For every boot_type we should have a corresponding mfg mode boot type. enhanced_boot_table = DotDict() for key, value in boot_table.items(): enhanced_boot_table[key] = value enhanced_boot_table[key + " (mfg)"] = value return enhanced_boot_table
def test_dicts_inside_lists_are_converted(self): leaf = {'key': 'value'} d = DotDict(list=[leaf, leaf, [leaf]], deeper=[leaf, {'deeper': leaf}]) assert_equal(d.list[0].key, 'value') assert_equal(d.list[1].key, 'value') assert_equal(d.list[2][0].key, 'value') assert_equal(d.deeper[0].key, 'value') assert_equal(d.deeper[1].deeper.key, 'value')
def test_items_inserted_outside_init_are_not_converted(self): d = DotDict() d['dict'] = {'key': 'value'} d['list'] = [{}] assert_equal(d.dict['key'], 'value') assert_false(hasattr(d.dict, 'key')) assert_true(isinstance(d.dict, dict)) assert_true(isinstance(d.list[0], dict))
def _cookie_as_dot_dict(self, cookie): dot_dict = DotDict() for key in cookie: if key == "expires": dot_dict[key] = datetime.fromtimestamp(cookie[key]) else: dot_dict[key] = cookie[key] return dot_dict
def _convert(self, value): if isinstance(value, xmlrpclib.Binary): return bytes(value.data) if is_dict_like(value): return DotDict((k, self._convert(v)) for k, v in value.items()) if is_list_like(value): return [self._convert(v) for v in value] return value
def _set_variables(self, positional, kwargs, variables): before_varargs, varargs = self._split_args_and_varargs(positional) for name, value in zip(self.arguments.positional, before_varargs): variables['${%s}' % name] = value if self.arguments.varargs: variables['@{%s}' % self.arguments.varargs] = varargs if self.arguments.kwargs: variables['&{%s}' % self.arguments.kwargs] = DotDict(kwargs)
def test_nested_dicts_inside_list_likes(self): leaf = {'key': 'value'} d = DotDict(list=[leaf, leaf, [leaf]], tuple=(leaf, {'deeper': leaf})) assert_equal(d.list[0].key, 'value') assert_equal(d.list[1].key, 'value') assert_equal(d.list[2][0].key, 'value') assert_equal(d.tuple[0].key, 'value') assert_equal(d.tuple[1].deeper.key, 'value') assert_true(isinstance(d.tuple, list))
def __init__(self, argspec, variables): defaults = argspec.defaults if variables: defaults = variables.replace_list(defaults) self._positional = argspec.positional self._supports_kwargs = bool(argspec.kwargs) self._supports_named = argspec.supports_named self.args = [None] * argspec.minargs + [Default(d) for d in defaults] self.kwargs = DotDict()
def _handle_binary(self, value): if isinstance(value, xmlrpclib.Binary): return str(value) if is_dict_like(value): return DotDict( (k, self._handle_binary(v)) for k, v in value.items()) if is_list_like(value): return [self._handle_binary(v) for v in value] return value
def _set_variables(self, positional, kwargs, variables): before_varargs, varargs = self._split_args_and_varargs(positional) for name, value in zip(self.arguments.positional, before_varargs): if isinstance(value, DefaultValue): value = value.resolve(variables) variables['${%s}' % name] = value if self.arguments.varargs: variables['@{%s}' % self.arguments.varargs] = varargs if self.arguments.kwargs: variables['&{%s}' % self.arguments.kwargs] = DotDict(kwargs)
def wait_for_comm_cycle(start_boot_seconds, quiet=None): r""" Wait for the BMC uptime to be less than elapsed_boot_time. This function will tolerate an expected loss of communication to the BMC. This function is useful when some kind of reboot has been initiated by the caller. Description of argument(s): start_boot_seconds The time that the boot test started. The format is the epoch time in seconds, i.e. the number of seconds since 1970-01-01 00:00:00 UTC. This value should be obtained from the BMC so that it is not dependent on any kind of synchronization between this machine and the target BMC This will allow this program to work correctly even in a simulated environment. This value should be obtained by the caller prior to initiating a reboot. It can be obtained as follows: state = st.get_state(req_states=['epoch_seconds']) """ quiet = int(gp.get_var_value(quiet, 0)) # Validate parms. error_message = gv.valid_integer(start_boot_seconds) if error_message: BuiltIn().fail(gp.sprint_error(error_message)) # Wait for uptime to be less than elapsed_boot_time. set_start_boot_seconds(start_boot_seconds) expr = 'int(float(state[\'uptime\'])) < int(state[\'elapsed_boot_time\'])' match_state = DotDict([('uptime', '^[0-9\\.]+$'), ('elapsed_boot_time', '^[0-9]+$'), (expressions_key(), [expr])]) wait_state(match_state, wait_time="12 mins", interval="5 seconds") gp.qprint_timen("Verifying that REST/Redfish API interface is working.") if not redfish_support_trans_state: match_state = DotDict([('rest', '^1$')]) else: match_state = DotDict([('redfish', '^1$')]) state = wait_state(match_state, wait_time="5 mins", interval="2 seconds")
def _undecorate(self, name, value): validate_var(name) if name[0] == '@': if not is_list_like(value): self._raise_cannot_set_type(name, value, 'list') value = list(value) if name[0] == '&': if not is_dict_like(value): self._raise_cannot_set_type(name, value, 'dictionary') value = DotDict(value) return name[2:-1], value
def _validate_value(self, value, identifier, name): if identifier == '@': if not is_list_like(value): raise VariableError("Value of variable '%s' is not list or " "list-like." % name) return list(value) if identifier == '&': if not is_dict_like(value): raise VariableError("Value of variable '%s' is not dictionary " "or dictionary-like." % name) return DotDict(value) return value
def replace(self, positional, named, variables=None): # `variables` is None in dry-run mode and when using Libdoc. if variables: positional = variables.replace_list(positional, self._resolve_until) named = list(self._replace_named(named, variables.replace_scalar)) else: positional = list(positional) named = [item for item in named if isinstance(item, tuple)] # FIXME: DotDict is somewhat slow and not generally needed. # Either use normal dict by default or return list of tuples. return positional, DotDict(named)
def _set_variables(self, positional, kwargs, variables): spec = self.arguments args, varargs = self._split_args_and_varargs(positional) kwonly, kwargs = self._split_kwonly_and_kwargs(kwargs) for name, value in chain(zip(spec.positional, args), kwonly): if isinstance(value, DefaultValue): value = value.resolve(variables) variables['${%s}' % name] = value if spec.varargs: variables['@{%s}' % spec.varargs] = varargs if spec.kwargs: variables['&{%s}' % spec.kwargs] = DotDict(kwargs)