def test_invalid_cause(self): try: raise_from(IndexError, 5) except TypeError as e: self.assertIn("exception cause", str(e)) else: self.fail("No exception raised")
def test_mandatoryValues(self): # Try to create a Podcast once for each mandatory property. # On each iteration, exactly one of the properties is not set. # Therefore, an exception should be thrown on each iteration. mandatory_properties = set([ "description", "title", "link", "explicit", ]) for test_property in mandatory_properties: fg = Podcast() if test_property != "description": fg.description = self.description if test_property != "title": fg.name = self.name if test_property != "link": fg.website = self.website if test_property != "explicit": fg.explicit = self.explicit try: self.assertRaises(ValueError, fg._create_rss) except AssertionError as e: raise_from(AssertionError( "The test failed for %s" % test_property), e)
def from_response(cls, requested_api_version, response): try: header = to_xml(response.text, encoding=response.encoding).find('{%s}Header' % SOAPNS) if header is None: raise ParseError() except ParseError as e: raise_from(EWSWarning('Unknown XML response from %s (response: %s)' % (response, response.text)), e) info = header.find('{%s}ServerVersionInfo' % TNS) if info is None: raise TransportError('No ServerVersionInfo in response: %s' % response.text) try: build = Build.from_xml(info) except ValueError: raise TransportError('Bad ServerVersionInfo in response: %s' % response.text) # Not all Exchange servers send the Version element api_version_from_server = info.get('Version') or build.api_version() if api_version_from_server != requested_api_version: if api_version_from_server.startswith('V2_') \ or api_version_from_server.startswith('V2015_') \ or api_version_from_server.startswith('V2016_'): # Office 365 is an expert in sending invalid API version strings... log.info('API version "%s" worked but server reports version "%s". Using "%s"', requested_api_version, api_version_from_server, requested_api_version) api_version_from_server = requested_api_version else: # Work around a bug in Exchange that reports a bogus API version in the XML response. Trust server # response except 'V2_nn' or 'V201[5,6]_nn_mm' which is bogus log.info('API version "%s" worked but server reports version "%s". Using "%s"', requested_api_version, api_version_from_server, api_version_from_server) return cls(build, api_version_from_server)
def vect_wrapper(x, out=None, **kwargs): # Find out dimension first if isinstance(x, np.ndarray): # array if x.ndim == 1: ndim = 1 elif x.ndim == 2: ndim = len(x) else: raise ValueError('only 1- or 2-dimensional arrays ' 'supported.') else: # meshgrid or single value try: ndim = len(x) except TypeError: ndim = 1 if is_valid_input_meshgrid(x, ndim): return _vect_wrapper_meshgrid(x, out, ndim, **kwargs) elif is_valid_input_array(x, ndim): return _vect_wrapper_array(x, out, **kwargs) else: try: return func(x) except Exception as err: raise_from( TypeError('invalid vectorized input type.'), err)
def test_class_cause(self): try: raise_from(IndexError, KeyError) except IndexError as e: self.assertIsInstance(e.__cause__, KeyError) else: self.fail("No exception raised")
def test(self): # We need the version for this try: socket.gethostbyname_ex(self.server)[2][0] except socket.gaierror as e: raise_from(TransportError("Server '%s' does not exist" % self.server), e) return test_credentials(protocol=self)
def convert(self, blob, **kw): tmp_dir = self.TMP_DIR cur_dir = os.getcwd() with make_temp_file(blob, suffix=".doc") as in_fn,\ make_temp_file(suffix='.txt') as out_fn: try: os.chdir(str(tmp_dir)) subprocess.check_call( ['abiword', '--to', os.path.basename(out_fn), os.path.basename(in_fn)]) except Exception as e: raise_from(ConversionError('abiword'), e) finally: os.chdir(cur_dir) converted = open(out_fn).read() encoding = self.encoding_sniffer.from_file(out_fn) if encoding in ("binary", None): encoding = "ascii" try: converted_unicode = unicode(converted, encoding, errors="ignore") except: traceback.print_exc() converted_unicode = unicode(converted, errors="ignore") return converted_unicode
def contains_set(self, other, tol=0.0): """Test if another set is contained. Parameters ---------- other : `Set` The set to be tested. It must implement a ``min()`` and a ``max()`` method, otherwise a `TypeError` is raised. tol : `float`, optional The maximum allowed distance in 'inf'-norm between the other set and this interval product. Default: 0.0 Examples -------- >>> b1, e1 = [-1, 0, 2], [-0.5, 0, 3] >>> rbox1 = IntervalProd(b1, e1) >>> b2, e2 = [-0.6, 0, 2.1], [-0.5, 0, 2.5] >>> rbox2 = IntervalProd(b2, e2) >>> rbox1.contains_set(rbox2) True >>> rbox2.contains_set(rbox1) False """ try: return (self.approx_contains(other.min(), tol) and self.approx_contains(other.max(), tol)) except AttributeError as err: raise_from( AttributeError('cannot test {!r} without `min()` and `max()`' 'methods.'.format(other)), err)
def create_function_from_source(function_source, imports=None): """Return a function object from a function source Parameters ---------- function_source : unicode string unicode string defining a function imports : list of strings list of import statements in string form that allow the function to be executed in an otherwise empty namespace """ ns = {} import_keys = [] try: if imports is not None: for statement in imports: exec(statement, ns) import_keys = list(ns.keys()) exec(function_source, ns) except Exception as e: msg = 'Error executing function\n{}\n'.format(function_source) msg += ("Functions in connection strings have to be standalone. " "They cannot be declared either interactively or inside " "another function or inline in the connect string. Any " "imports should be done inside the function.") raise_from(RuntimeError(msg), e) ns_funcs = list(set(ns) - set(import_keys + ['__builtins__'])) assert len(ns_funcs) == 1, "Function or inputs are ill-defined" func = ns[ns_funcs[0]] return func
def list_leases(self, uuid=None): """ List current subnet leases Args: uuid(str): Filter the leases by uuid Returns: list of :class:~Lease: current leases """ try: lease_files = os.listdir(self.path) except OSError as e: raise_from( LagoSubnetLeaseBadPermissionsException(self.path, e.strerror), e ) leases = [ self.create_lease_object_from_idx(lease_file.split('.')[0]) for lease_file in lease_files if lease_file != LOCK_NAME ] if not uuid: return leases else: return [lease for lease in leases if lease.uuid == uuid]
def get_key_by_fingerprint(self, fingerprint): try: return (self.session.query(PublicKey) .filter_by(fingerprint=hexlify(fingerprint)) .one()) except NoResultFound as e: raise_from(KeyNotFoundError('Key {} not found'.format(hexlify (fingerprint))), e)
def convert(self, blob, **kw): with make_temp_file(blob) as in_fn, make_temp_file() as out_fn: try: subprocess.check_call(['convert', in_fn, "pdf:" + out_fn]) converted = open(out_fn).read() return converted except Exception as e: raise_from(ConversionError('convert'), e)
def add_article(self, url, title=None, tags=None): if isinstance(tags, tuple): tags = ','.join(list(tags)) try: return self._pocket.add(url, title, tags) except PocketException as e: raise_from(self._check_exception(e), e)
def parse_response(cls, response): """Parse the cls.res_msg_type proto msg.""" res_msg = cls.res_msg_type() try: res_msg.ParseFromString(response.content) except DecodeError as e: raise_from(ParseException(str(e)), e) return res_msg
def run_uno(): try: self._process = subprocess.Popen(cmd, close_fds=True, cwd=bytes(self.TMP_DIR)) self._process.communicate() except Exception as e: logger.error('run_uno error: %s', bytes(e), exc_info=True) raise_from(ConversionError('unoconv'), e)
def search(self, search, state, tag, sort): try: articles = self._pocket.retrieve(search=search, state=state, tag=tag, sort=sort) return self._get_articles_index(articles) except PocketException as e: raise_from(self._check_exception(e), e)
def _autodiscover_hostname(hostname, credentials, email, has_ssl, verify, auth_type=None): # Tries to get autodiscover data on a specific host. If we are HTTP redirected, we restart the autodiscover dance on # the new host. scheme = 'https' if has_ssl else 'http' url = '%s://%s/Autodiscover/Autodiscover.xml' % (scheme, hostname) log.debug('Trying autodiscover on %s', url) if not auth_type: try: auth_type = _get_autodiscover_auth_type(url=url, verify=verify, email=email) except RedirectError as e: log.debug(e) redirect_url, redirect_hostname, redirect_has_ssl = e.url, e.server, e.has_ssl log.debug('We were redirected to %s', redirect_url) canonical_hostname = _get_canonical_name(redirect_hostname) if canonical_hostname: log.debug('Canonical hostname is %s', canonical_hostname) redirect_hostname = canonical_hostname # Try the process on the new host, without 'www'. This is beyond the autodiscover protocol and an attempt to # work around seriously misconfigured Exchange servers. It's probably better to just show the Exchange # admins the report from https://testconnectivity.microsoft.com if redirect_hostname.startswith('www.'): redirect_hostname = redirect_hostname[4:] if redirect_hostname == hostname: log.debug('We were redirected to the same host') raise_from(AutoDiscoverFailed('We were redirected to the same host'), e) raise_from(RedirectError(url='%s://%s' % ('https' if redirect_has_ssl else 'http', redirect_hostname)), e) autodiscover_protocol = AutodiscoverProtocol(service_endpoint=url, credentials=credentials, auth_type=auth_type, verify_ssl=verify) r = _get_autodiscover_response(protocol=autodiscover_protocol, email=email) if r.status_code == 302: redirect_url, redirect_hostname, redirect_has_ssl = get_redirect_url(r) log.debug('We were redirected to %s', redirect_url) # Don't raise RedirectError here because we need to pass the ssl and auth_type data return _autodiscover_hostname(redirect_hostname, credentials, email, has_ssl=redirect_has_ssl, verify=verify, auth_type=None) domain = get_domain(email) try: server, has_ssl, ews_url, ews_auth_type, primary_smtp_address = _parse_response(r.text) if not primary_smtp_address: primary_smtp_address = email except (ErrorNonExistentMailbox, AutoDiscoverRedirect): # These are both valid responses from an autodiscover server, showing that we have found the correct # server for the original domain. Fill cache before re-raising log.debug('Adding cache entry for %s (hostname %s, has_ssl %s)', domain, hostname, has_ssl) _autodiscover_cache[(domain, credentials, verify)] = autodiscover_protocol raise # Cache the final hostname of the autodiscover service so we don't need to autodiscover the same domain again log.debug('Adding cache entry for %s (hostname %s, has_ssl %s)', domain, hostname, has_ssl) _autodiscover_cache[(domain, credentials, verify)] = autodiscover_protocol # Autodiscover response contains an auth type, but we don't want to spend time here testing if it actually works. # Instead of forcing a possibly-wrong auth type, just let Protocol auto-detect the auth type. # If we didn't want to verify SSL on the autodiscover server, we probably don't want to on the Exchange server, # either. return primary_smtp_address, Protocol(service_endpoint=ews_url, credentials=credentials, auth_type=None, verify_ssl=verify)
def __call__(self, value, **flags): try: method = self.pattern.search if flags.get('pattern_group__search', self.search) else self.pattern.match match = method(value) if match is None: raise ExtractorException return match.group(flags.get('pattern_group__group', self.group)) except Exception as e: raise_from(ExtractorException, e)
def __init__(self, *args): if args: if isinstance(args[0], type) and issubclass(args[0], Struct): self.structure_list = [] self.structure_class = args[0] else: raise_from(ArrayError("Unsupported type"), type(args[0])) else: raise_from(ArrayError("Array Error"), 'empty array')
def test_raise_from_None(self): try: try: raise TypeError("foo") except: raise_from(ValueError(), None) except ValueError as e: self.assertTrue(isinstance(e.__context__, TypeError)) self.assertIsNone(e.__cause__)
def __unpack__(self, type_, buf, _size=None): fmt = self.endian + type_ size = struct.calcsize(fmt) if _size is None else _size try: unpacked = struct.unpack(fmt, buf[:size]), buf[size:] except struct.error as exc: raise_from(UnpackError("Unable to unpack structure"), exc) else: return unpacked
def check(tests, byteorder, signed=False): for test, expected in tests.items(): try: self.assertEqual( int.from_bytes(test, byteorder, signed=signed), int(expected)) except Exception as err: raise_from(AssertionError( "failed to convert {0} with byteorder={1!r} and signed={2}" .format(test, byteorder, signed)), err)
def add_extra_error_message(e): etype, value, traceback = sys.exc_info() extra_message = common_errors[(type(e), str(e))] if extra_message: if sys.version_info >= (3,): raise_from(AutogradHint(extra_message), e) else: raise_(AutogradHint, (extra_message, etype, value), traceback) raise_(etype, value, traceback)
def activate(self, plugins=[]): """ Activates given plugins. This calls mainly plugin.activate() and plugins register needed resources like commands, signals or documents. If given plugins have not been initialised, this is also done via :func:`_load`. :param plugins: List of plugin names :type plugins: list of strings """ self._log.debug("Plugins Activation started") if not isinstance(plugins, list): raise AttributeError("plugins must be a list, not %s" % type(plugins)) self._log.debug("Plugins to activate: %s" % ", ".join(plugins)) plugins_activated = [] for plugin_name in plugins: if not isinstance(plugin_name, str): raise AttributeError("plugin name must be a str, not %s" % type(plugin_name)) if plugin_name not in self._plugins.keys() and plugin_name in self.classes._classes.keys(): self._log.debug("Initialisation needed before activation.") try: self.initialise_by_names([plugin_name]) except Exception as e: self._log.error("Couldn't initialise plugin %s. Reason %s" % (plugin_name, e)) if self._app.strict: error = "Couldn't initialise plugin %s" % plugin_name if sys.version_info[0] < 3: error += "Reason: %s" % e raise_from(Exception(error), e) else: continue if plugin_name in self._plugins.keys(): self._log.debug("Activating plugin %s" % plugin_name) if not self._plugins[plugin_name].active: try: self._plugins[plugin_name].activate() except Exception as e: raise_from( PluginNotActivatableException("Plugin %s could not be activated: %s" % (plugin_name, e)), e) else: self._log.debug("Plugin %s activated" % plugin_name) plugins_activated.append(plugin_name) else: self._log.warning("Plugin %s got already activated." % plugin_name) if self._app.strict: raise PluginNotInitialisableException() self._log.info("Plugins activated: %s" % ", ".join(plugins_activated))
def __call__(self, value, **flags): try: value = self.coercer(value) if not isinstance(value, self.type): raise ExtractorException('Unable to coerce {} to {}'.format( value, self.type)) except ExtractorException: raise except Exception as e: raise_from(ExtractorException, e) return value
def __call__(self, value, **flags): """ :param value: The value to attempt extraction from :param flags: A dictionary of flags :return: The extracted value :raise: ExtractorException """ try: return value[self.item_or_slice] except Exception as e: raise_from(ExtractorException, e)
def __getattr__(self, attr): try: return object.__getattribute__(self, attr) except AttributeError as e: try: return self._jsondata[attr] except KeyError as e: if six.PY2: raise_from(AttributeError(e.message), e) else: raise_from(AttributeError(e), e)
def _get_autodiscover_auth_type(url, email, verify, encoding='utf-8'): try: data = _get_autodiscover_payload(email=email, encoding=encoding) return transport.get_autodiscover_authtype(service_endpoint=url, data=data, timeout=TIMEOUT, verify=verify) except (TransportError, requests.exceptions.ChunkedEncodingError, requests.exceptions.ConnectionError, ConnectionResetError, requests.exceptions.Timeout, SocketTimeout, requests.exceptions.SSLError) as e: if isinstance(e, RedirectError): raise log.debug('Error guessing auth type: %s', e) raise_from(AutoDiscoverFailed('Error guessing auth type: %s' % e), e)
def test_erroneous_cause(self): class MyException(Exception): def __init__(self): raise RuntimeError() try: raise_from(IndexError, MyException) except RuntimeError: pass else: self.fail("No exception raised")
def _key_for(self, target): try: return self._cache_key_generator.key_for_target(target, transitive=self._invalidate_dependents, fingerprint_strategy=self._fingerprint_strategy) except Exception as e: # This is a catch-all for problems we haven't caught up with and given a better diagnostic. # TODO(Eric Ayers): If you see this exception, add a fix to catch the problem earlier. new_exception = self.CacheValidationError("Problem validating target {} in {}: {}" .format(target.id, target.address.spec_path, e)) raise_from(self.CacheValidationError(new_exception), e)
def send_command(self, method, params=None): """ Send a command to the bulb. :param str method: The name of the method to send. :param list params: The list of parameters for the method. :raises BulbException: When the bulb indicates an error condition. :returns: The response from the bulb. """ command = { "id": self._cmd_id, "method": method, "params": params, } logging.debug("%s > %s", self, command) try: self._socket.send((json.dumps(command) + "\r\n").encode("utf8")) if self.type in ['desklamp']: self._socket.send(("\r\n").encode("utf8")) except socket.error as ex: # Some error occurred, remove this socket in hopes that we can later # create a new one. self.__socket.close() self.__socket = None raise_from(BulbException('A socket error occurred when sending the command.'), ex) if self._music_mode: # We're in music mode, nothing else will happen. return {"result": ["ok"]} # The bulb will send us updates on its state in addition to responses, # so we want to make sure that we read until we see an actual response. response = None while response is None: try: data = self._socket.recv(16 * 1024) except socket.error as e: # An error occured, let's close and abort... self.__socket.close() self.__socket = None response = {"error": "Bulb closed the connection." + e} break for line in data.split(b"\r\n"): if not line: continue try: line = json.loads(line.decode("utf8")) logging.debug("%s < %s", self, line) except ValueError: line = {"result": ["invalid command"]} if line.get("method") != "props": # This is probably the response we want. response = line else: self._last_properties.update(line["params"]) if "error" in response: raise BulbException(response["error"]) return response
def check_keys_match_recursive(expected_val, actual_val, keys, strict=True): """Utility to recursively check response values expected and actual both have to be of the same type or it will raise an error. Example: >>> check_keys_match_recursive({"a": {"b": "c"}}, {"a": {"b": "c"}}, []) is None True >>> check_keys_match_recursive({"a": {"b": "c"}}, {"a": {"b": "d"}}, []) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): File "/home/michael/code/tavern/tavern/tavern/util/dict_util.py", line 223, in check_keys_match_recursive tavern.util.exceptions.KeyMismatchError: Key mismatch: (expected["a"]["b"] = 'c', actual["a"]["b"] = 'd') Todo: This could be turned into a single-dispatch function for cleaner code and to remove a load of the isinstance checks Args: expected_val (dict, list, str): expected value actual_val (dict, list, str): actual value keys (list): any keys which have been recursively parsed to get to this point. Used for debug output. strict (bool): Whether 'strict' key checking should be done. If this is False, a mismatch in dictionary keys between the expected and the actual values will not raise an error (but a mismatch in value will raise an error) Raises: KeyMismatchError: expected_val and actual_val did not match """ # pylint: disable=too-many-locals,too-many-statements def full_err(): """Get error in the format: a["b"]["c"] = 4, b["b"]["c"] = {'key': 'value'} """ def _format_err(which): return "{}{}".format(which, "".join('["{}"]'.format(key) for key in keys)) e_formatted = _format_err("expected") a_formatted = _format_err("actual") return "{} = '{}' (type = {}), {} = '{}' (type = {})".format( e_formatted, expected_val, type(expected_val), a_formatted, actual_val, type(actual_val), ) # Check required because of python 2/3 unicode compatability when loading yaml if isinstance(actual_val, ustr): actual_type = str else: actual_type = type(actual_val) if expected_val is ANYTHING: # Match anything. We could just early exit here but having the debug # logging below is useful expected_matches = True elif isinstance(expected_val, TypeSentinel): # If the 'expected' type is actually just a sentinel for another type, # then it should match expected_matches = expected_val.constructor == actual_type else: # Normal matching expected_matches = ( # If they are the same type isinstance(expected_val, actual_type) or # Handles the case where, for example, the 'actual type' returned by # a custom backend returns an OrderedDict, which is a subclass of # dict but will raise a confusing error if the contents are # different issubclass(actual_type, type(expected_val))) try: assert actual_val == expected_val except AssertionError as e: # At this point, there is likely to be an error unless we're using any # of the type sentinels if not (expected_val is ANYTHING): # pylint: disable=superfluous-parens # NOTE # Second part of this check will be removed in future - see deprecation # warning below for details if not expected_matches and expected_val is not None: raise_from( exceptions.KeyMismatchError( "Type of returned data was different than expected ({})" .format(full_err())), e, ) if isinstance(expected_val, dict): akeys = set(actual_val.keys()) ekeys = set(expected_val.keys()) if akeys != ekeys: extra_actual_keys = akeys - ekeys extra_expected_keys = ekeys - akeys msg = "" if extra_actual_keys: msg += " - Extra keys in response: {}".format( extra_actual_keys) if extra_expected_keys: msg += " - Keys missing from response: {}".format( extra_expected_keys) full_msg = "Structure of returned data was different than expected {} ({})".format( msg, full_err()) # If there are more keys in 'expected' compared to 'actual', # this is still a hard error and we shouldn't continue if extra_expected_keys or strict: raise_from(exceptions.KeyMismatchError(full_msg), e) else: logger.debug( "Mismatch in returned data, continuing due to strict=%s: %s", strict, full_msg, exc_info=True, ) # If strict is True, an error will be raised above. If not, recurse # through both sets of keys and just ignore missing ones to_recurse = akeys | ekeys for key in to_recurse: try: check_keys_match_recursive(expected_val[key], actual_val[key], keys + [key], strict) except KeyError: logger.debug( "Skipping comparing missing key %s due to strict=%s", key, strict, ) elif isinstance(expected_val, list): if len(expected_val) != len(actual_val): raise_from( exceptions.KeyMismatchError( "Length of returned list was different than expected - expected {} items, got {} ({})" .format(len(expected_val), len(actual_val), full_err())), e, ) # TODO # Check things in the wrong order? for i, (e_val, a_val) in enumerate(zip(expected_val, actual_val)): try: check_keys_match_recursive(e_val, a_val, keys + [i], strict) except exceptions.KeyMismatchError as sub_e: # This will still raise an error, but it will be more # obvious where the error came from (in python 3 at least) # and will take ANYTHING into account raise_from(sub_e, e) elif expected_val is None: warnings.warn( "Expected value was 'null', so this check will pass - this will be removed in a future version. IF you want to check against 'any' value, use '!anything' instead.", FutureWarning, ) elif expected_val is ANYTHING: logger.debug("Actual value = '%s' - matches !anything", actual_val) elif isinstance(expected_val, TypeSentinel) and expected_matches: logger.debug( "Actual value = '%s' - matches !any%s", actual_val, expected_val.constructor, ) else: raise_from( exceptions.KeyMismatchError("Key mismatch: ({})".format( full_err())), e)
def readFile(self, inputFilePath, enforceAscii=True, selectList=None, excludeFlag=False, logFilePath=None, outDirPath=None, cleanUp=True, **kwargs): """Parse the data blocks in the input mmCIF format data file into list of DataContainers(). The data category content within each data block is stored a collection of DataCategory objects within each DataContainer. Args: inputFilePath (string): Input file path enforceAscii (bool, optional): Flag to requiring pre-filtering operation to convert input file to ASCII encoding. See encoding error options. selectList (List, optional): List of data category names to be extracted or excluded from the input file (default: select/extract) excludeFlag (bool, optional): Flag to indicate selectList should be treated as an exclusion list logFilePath (string, optional): Log file path (if not provided this will be derived from the input file.) outDirPath (string, optional): Path for translated/reencoded files and default logfiles. cleanUp (bool, optional): Flag to automatically remove logs and temporary files on exit. **kwargs: Placeholder for missing keyword arguments. Returns: List of DataContainers: Contents of input file parsed into a list of DataContainer objects. """ if kwargs: logger.warning("Unsupported keyword arguments %s", kwargs.keys()) asciiFilePath = None filePath = str(inputFilePath) # oPath = outDirPath if outDirPath else '.' oPath = self._chooseTemporaryPath(inputFilePath, outDirPath=outDirPath) try: # lPath = logFilePath if not lPath: lPath = self._getDefaultFileName(filePath, fileType="cif-parser-log", outDirPath=oPath) # self._setLogFilePath(lPath) # if not self._fileExists(filePath): return [] # filePath = self._uncompress(filePath, oPath) tPath = filePath if enforceAscii: asciiFilePath = self._getDefaultFileName(filePath, fileType="cif-parser-ascii", fileExt="cif", outDirPath=oPath) encodingErrors = "xmlcharrefreplace" if self._useCharRefs else "ignore" logger.debug("Filtering input file to %s using encoding errors as %s", asciiFilePath, encodingErrors) ok = self._toAscii(filePath, asciiFilePath, chunkSize=5000, encodingErrors=encodingErrors, readEncodingErrors=self._readEncodingErrors) if ok: tPath = asciiFilePath # readDef = None if selectList is not None and selectList: readDef = self.__getSelectionDef(selectList, excludeFlag) # containerL, _ = self.__readData(tPath, readDef=readDef, cleanUp=cleanUp, logFilePath=lPath, maxLineLength=self._maxInputLineLength) # if cleanUp: self._cleanupFile(asciiFilePath, asciiFilePath) self._cleanupFile(filePath != str(inputFilePath), filePath) self._setContainerProperties(containerL, locator=str(inputFilePath), load_date=self._getTimeStamp(), uid=uuid.uuid4().hex) # return containerL except (PdbxError, PdbxSyntaxError) as ex: self._cleanupFile(asciiFilePath and cleanUp, asciiFilePath) if self._raiseExceptions: raise_from(ex, None) # raise ex from None except Exception as e: self._cleanupFile(asciiFilePath and cleanUp, asciiFilePath) msg = "Failing read for %s with %s" % (filePath, str(e)) self._logError(msg) return []
def __getattr__(self, attr): raise_from(RuntimeError(self.message), self.exc)
def _format_test_marks(original_marks, fmt_vars, test_name): """Given the 'raw' marks from the test and any available format variables, generate new marks for this test Args: original_marks (list): Raw string from test - should correspond to either a pytest builtin mark or a custom user mark fmt_vars (dict): dictionary containing available format variables test_name (str): Name of test (for error logging) Returns: tuple: first element is normal pytest mark objects, second element is all marks which were formatted (no matter their content) Todo: Fix doctests below - failing due to missing pytest markers Example: # >>> _format_test_marks([], {}, 'abc') # ([], []) # >>> _format_test_marks(['tavernmarker'], {}, 'abc') # (['tavernmarker'], []) # >>> _format_test_marks(['{formatme}'], {'formatme': 'tavernmarker'}, 'abc') # (['tavernmarker'], []) # >>> _format_test_marks([{'skipif': '{skiptest}'}], {'skiptest': true}, 'abc') # (['tavernmarker'], []) """ pytest_marks = [] formatted_marks = [] for m in original_marks: if isinstance(m, str): # a normal mark m = format_keys(m, fmt_vars) pytest_marks.append(getattr(pytest.mark, m)) elif isinstance(m, dict): # skipif or parametrize (for now) for markname, extra_arg in m.items(): # NOTE # cannot do 'skipif' and rely on a parametrized # argument. try: extra_arg = format_keys(extra_arg, fmt_vars) except exceptions.MissingFormatError as e: msg = "Tried to use mark '{}' (with value '{}') in test '{}' but one or more format variables was not in any configuration file used by the test".format( markname, extra_arg, test_name) # NOTE # we could continue and let it fail in the test, but # this gives a better indication of what actually # happened (even if it is difficult to test) raise_from(exceptions.MissingFormatError(msg), e) else: pytest_marks.append( getattr(pytest.mark, markname)(extra_arg)) formatted_marks.append({markname: extra_arg}) else: raise exceptions.BadSchemaError("Unexpected mark type '{}'".format( type(m))) return pytest_marks, formatted_marks
def contract(tensor1, tensor2, labels1, labels2, index_slice1=None, index_slice2=None): """ Contract the indices of `tensor1` specified in `labels1` with the indices of `tensor2` specified in `labels2`. This is an intuitive wrapper for numpy's `tensordot` function. A pairwise tensor contraction is specified by a pair of tensors `tensor1` and `tensor2`, a set of index labels `labels1` from `tensor1`, and a set of index labels `labels2` from `tensor2`. All indices of `tensor1` with label in `labels1` are fused (preserving order) into a single label, and likewise for `tensor2`, then these two fused indices are contracted. Parameters ---------- tensor1, tensor2 : Tensor The two tensors to be contracted. labels1, labels2 : str or list The indices of `tensor1` and `tensor2` to be contracted. Can either be a single label, or a list of labels. Examples -------- Define a random 2x2 tensor with index labels "spam" and "eggs" and a random 2x3x2x4 tensor with index labels 'i0', 'i1', etc. >>> A = random_tensor(2, 2, labels = ["spam", "eggs"]) >>> B = random_tensor(2, 3, 2, 4) >>> print(B) Tensor object: shape = (2, 3, 2, 4), labels = ['i0', 'i1', 'i2', 'i3'] Contract the "spam" index of tensor A with the "i2" index of tensor B. >>> C = contract(A, B, "spam", "i2") >>> print(C) Tensor object: shape = (2, 2, 3, 4), labels = ['eggs', 'i0', 'i1', 'i3'] Contract the "spam" index of tensor A with the "i0" index of tensor B and also contract the "eggs" index of tensor A with the "i2" index of tensor B. >>> D = contract(A, B, ["spam", "eggs"], ["i0", "i2"]) >>> print(D) Tensor object: shape = (3, 4), labels = ['i1', 'i3'] Note that the following shorthand can be used to perform the same operation described above. >>> D = A["spam", "eggs"]*B["i0", "i2"] >>> print(D) Tensor object: shape = (3, 4), labels = ['i1', 'i3'] Returns ------- C : Tensor The result of the tensor contraction. Regarding the `data` and `labels` attributes of this tensor, `C` will have all of the uncontracted indices of `tensor1` and `tensor2`, with the indices of `tensor1` always coming before those of `tensor2`, and their internal order preserved. """ # If the input labels is not a list, convert to list with one entry if not isinstance(labels1, list): labels1 = [labels1] if not isinstance(labels2, list): labels2 = [labels2] tensor1_indices = [] for label in labels1: # Append all indices to tensor1_indices with label tensor1_indices.extend( [i for i, x in enumerate(tensor1.labels) if x == label]) tensor2_indices = [] for label in labels2: # Append all indices to tensor1_indices with label tensor2_indices.extend( [i for i, x in enumerate(tensor2.labels) if x == label]) # Replace the index -1 with the len(tensor1_indeces), # to refer to the last element in the list if index_slice1 is not None: index_slice1 = [ x if x != -1 else len(tensor1_indices) - 1 for x in index_slice1 ] if index_slice2 is not None: index_slice2 = [ x if x != -1 else len(tensor2_indices) - 1 for x in index_slice2 ] # Select some subset or permutation of these indices if specified # If no list is specified, contract all indices with the specified labels # If an empty list is specified, no indices will be contracted if index_slice1 is not None: tensor1_indices = [ j for i, j in enumerate(tensor1_indices) if i in index_slice1 ] if index_slice2 is not None: tensor2_indices = [ j for i, j in enumerate(tensor2_indices) if i in index_slice2 ] # Contract the two tensors try: C = Tensor( np.tensordot(tensor1.data, tensor2.data, (tensor1_indices, tensor2_indices))) except ValueError as e: # Print more useful info in case of ValueError. # Check if number of indices are equal if not len(tensor1_indices) == len(tensor2_indices): raise_from( ValueError('Number of indices in contraction does not match.', len(tensor1_indices), len(tensor2_indices)), e) # Check if indices have equal dimensions for i in range(len(tensor1_indices)): d1 = tensor1.data.shape[tensor1_indices[i]] d2 = tensor2.data.shape[tensor2_indices[i]] if d1 != d2: raise_from( ValueError((labels1[i] + ' with dim=' + str(d1) + ' does not match ' + labels2[i] + ' with dim=' + str(d2))), e) # Check if indices exist for i in range(len(labels1)): if not labels1[i] in tensor1.labels: raise_from( ValueError(labels1[i] + ' not in list of labels for tensor1'), e) if not labels2[i] in tensor2.labels: raise_from( ValueError(labels2[i] + ' not in list of labels for tensor2'), e) # Re-raise exception raise e # The following removes the contracted indices from the list of labels # and concatenates them new_tensor1_labels = [ i for j, i in enumerate(tensor1.labels) if j not in tensor1_indices ] new_tensor2_labels = [ i for j, i in enumerate(tensor2.labels) if j not in tensor2_indices ] C.labels = new_tensor1_labels + new_tensor2_labels return C
def check_keys_match_recursive(expected_val, actual_val, keys): """Utility to recursively check response values expected and actual both have to be of the same type or it will raise an error. Example: >>> check_keys_match_recursive({"a": {"b": "c"}}, {"a": {"b": "c"}}, []) is None True >>> check_keys_match_recursive({"a": {"b": "c"}}, {"a": {"b": "d"}}, []) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): File "/home/michael/code/tavern/tavern/tavern/util/dict_util.py", line 223, in check_keys_match_recursive tavern.util.exceptions.KeyMismatchError: Key mismatch: (expected["a"]["b"] = 'c', actual["a"]["b"] = 'd') Todo: This could be turned into a single-dispatch function for cleaner code and to remove a load of the isinstance checks Args: expected_val (dict, str): expected value actual_val (dict, str): actual value Raises: KeyMismatchError: expected_val and actual_val did not match """ def full_err(): """Get error in the format: a["b"]["c"] = 4, b["b"]["c"] = {'key': 'value'} """ def _format_err(which): return "{}{}".format(which, "".join('["{}"]'.format(key) for key in keys)) e_formatted = _format_err("expected") a_formatted = _format_err("actual") return "{} = '{}', {} = '{}'".format(e_formatted, expected_val, a_formatted, actual_val) # Check required because of python 2/3 unicode compatability when loading yaml if isinstance(actual_val, ustr): actual_type = str else: actual_type = type(actual_val) if expected_val == ANYTHING: # Match anything. We could just early exit here but having the debug # logging below is useful expected_matches = True elif isinstance(expected_val, TypeSentinel): # If the 'expected' type is actually just a sentinel for another type, # then it should match expected_matches = expected_val.constructor == actual_type else: # Normal matching expected_matches = isinstance(expected_val, actual_type) if expected_val != ANYTHING: # NOTE # Second part of this check will be removed in future - see deprecation # warning below for details if not expected_matches and expected_val is not None: raise exceptions.KeyMismatchError("Type of returned data was different than expected ({})".format(full_err())) if isinstance(expected_val, dict): if set(expected_val.keys()) != set(actual_val.keys()): raise exceptions.KeyMismatchError("Structure of returned data was different than expected ({})".format(full_err())) for key in expected_val: check_keys_match_recursive(expected_val[key], actual_val[key], keys + [key]) elif isinstance(expected_val, list): if len(expected_val) != len(actual_val): raise exceptions.KeyMismatchError("Length of returned list was different than expected ({})".format(full_err())) # TODO # Check things in the wrong order? try: # Do a simple check first because they might be identical assert actual_val == expected_val except AssertionError as e: for i, (e_val, a_val) in enumerate(zip(expected_val, actual_val)): try: check_keys_match_recursive(e_val, a_val, keys + [i]) except exceptions.KeyMismatchError as sub_e: # This will still raise an error, but it will be more # obvious where the error came from (in python 3 at least) # and will take ANYTHING into account raise_from(sub_e, e) else: try: assert actual_val == expected_val except AssertionError as e: if expected_val is None: warnings.warn("Expected value was 'null', so this check will pass - this will be removed in a future version. IF you want to check against 'any' value, use '!anything' instead.", FutureWarning) elif expected_val is ANYTHING: logger.debug("Actual value = '%s' - matches !anything", actual_val) elif isinstance(expected_val, TypeSentinel): if not expected_matches: raise_from(exceptions.KeyMismatchError("Key mismatch: ({})".format(full_err())), e) logger.debug("Actual value = '%s' - matches !any%s", actual_val, expected_val.constructor) else: raise_from(exceptions.KeyMismatchError("Key mismatch: ({})".format(full_err())), e)
def get_domain(email): try: return email.split('@')[1].lower() except (IndexError, AttributeError) as e: raise_from(ValueError("'%s' is not a valid email" % email), e)
def __init__(self, **kwargs): expected_blocks = { "client": { "client_id", "clean_session", # Can't really use this easily... # "userdata", # Force mqttv311 - fix if this becomes an issue # "protocol", "transport", }, "connect": {"host", "port", "keepalive", "timeout"}, "tls": { "enable", "ca_certs", "cert_reqs", "certfile", "keyfile", "tls_version", "ciphers", }, "auth": {"username", "password"}, } logger.debug("Initialising MQTT client with %s", kwargs) # check main block first check_expected_keys(expected_blocks.keys(), kwargs) # then check constructor/connect/tls_set args self._client_args = kwargs.pop("client", {}) check_expected_keys(expected_blocks["client"], self._client_args) self._connect_args = kwargs.pop("connect", {}) check_expected_keys(expected_blocks["connect"], self._connect_args) self._auth_args = kwargs.pop("auth", {}) check_expected_keys(expected_blocks["auth"], self._auth_args) if "host" not in self._connect_args: msg = "Need 'host' in 'connect' block for mqtt" logger.error(msg) raise exceptions.MissingKeysError(msg) self._connect_timeout = self._connect_args.pop("timeout", 3) # If there is any tls kwarg (including 'enable'), enable tls self._tls_args = kwargs.pop("tls", {}) check_expected_keys(expected_blocks["tls"], self._tls_args) self._handle_tls_args() logger.debug("TLS is %s", "enabled" if self._enable_tls else "disabled") logger.debug("Paho client args: %s", self._client_args) self._client = paho.Client(**self._client_args) self._client.enable_logger() if self._auth_args: self._client.username_pw_set(**self._auth_args) self._client.on_message = self._on_message if self._enable_tls: try: self._client.tls_set(**self._tls_args) except ValueError as e: # tls_set only raises ValueErrors directly raise_from(exceptions.MQTTTLSError("Unexpected error enabling TLS", e)) except ssl.SSLError as e: # incorrect cipher, etc. raise_from( exceptions.MQTTTLSError("Unexpected SSL error enabling TLS", e) ) # Arbitrary number, could just be 1 and only accept 1 message per stages # but we might want to raise an error if more than 1 message is received # during a test stage. self._message_queue = Queue(maxsize=10) self._userdata = {"queue": self._message_queue} self._client.user_data_set(self._userdata) # Topics to subscribe to - mapping of subscription message id to a tuple # of (topic, sub_status) where sub_status is true or false based on # whether it has finished subscribing or not self._subscribed = {} # callback self._client.on_subscribe = self._on_subscribe
def _handle_tls_args(self): """Make sure TLS options are valid """ if self._tls_args: # If _any_ options are specified, first assume we DO want it enabled self._enable_tls = True else: self._enable_tls = False return if "enable" in self._tls_args: if not self._tls_args.pop("enable"): # if enable=false, return immediately self._enable_tls = False return if "keyfile" in self._tls_args and "certfile" not in self._tls_args: raise exceptions.MQTTTLSError( "If specifying a TLS keyfile, a certfile also needs to be specified" ) def check_file_exists(key): try: with open(self._tls_args[key], "r"): pass except LoadError as e: raise_from( exceptions.MQTTTLSError( "Couldn't load '{}' from '{}'".format(key, self._tls_args[key]) ), e, ) except KeyError: pass # could be moved to schema validation stage check_file_exists("cert_reqs") check_file_exists("certfile") check_file_exists("keyfile") # This shouldn't raise an AttributeError because it's enumerated try: self._tls_args["cert_reqs"] = getattr(ssl, self._tls_args["cert_reqs"]) except KeyError: pass try: self._tls_args["tls_version"] = getattr(ssl, self._tls_args["tls_version"]) except AttributeError as e: raise_from( exceptions.MQTTTLSError( "Error getting TLS version from " "ssl module - ssl module had no attribute '{}'. Check the " "documentation for the version of python you're using to see " "if this a valid option.".format(self._tls_args["tls_version"]) ), e, ) except KeyError: pass
def writeFile(self, outputFilePath, containerList, maxLineLength=900, enforceAscii=True, lastInOrder=None, selectOrder=None, columnAlignFlag=True, useStopTokens=False, formattingStep=None, **kwargs): """Write input list of data containers to the specified output file path in mmCIF format. Args: outputFilePath (string): output file path containerList (list DataContainer objects, optional) maxLineLength (int, optional): Maximum length of output line (content is wrapped beyond this length) enforceAscii (bool, optional): Filter output (not implemented - content must be ascii compatible on input) lastInOrder (list of category names, optional): Move data categories in this list to end of each data block selectOrder (list of category names, optional): Write only data categories on this list. columnAlignFlag (bool, optional): Format the output in aligned columns (default=True) (Native Python Only) useStopTokens (bool, optional): Include terminating 'stop_' tokens at the end of mmCIF categories (loop_'s) (Native Python only) formattingStep (int, optional): The number row samples within each category used to estimate maximum column width for data alignment (Native Python only) **kwargs: Placeholder for unsupported key value pairs Returns: bool: Completion status """ lastInOrder = lastInOrder if lastInOrder else [ "pdbx_nonpoly_scheme", "pdbx_poly_seq_scheme", "atom_site", "atom_site_anisotrop" ] if kwargs: logger.warning("Unsupported keyword arguments %s", kwargs.keys()) try: if enforceAscii: encoding = "ascii" else: encoding = "utf-8" # if sys.version_info[0] > 2: with open(outputFilePath, "w", encoding=encoding) as ofh: self.__writeFile( ofh, containerList, maxLineLength=maxLineLength, columnAlignFlag=columnAlignFlag, lastInOrder=lastInOrder, selectOrder=selectOrder, useStopTokens=useStopTokens, formattingStep=formattingStep, enforceAscii=enforceAscii, cnvCharRefs=self._useCharRefs, ) else: if enforceAscii: with io.open(outputFilePath, "w", encoding=encoding) as ofh: self.__writeFile( ofh, containerList, maxLineLength=maxLineLength, columnAlignFlag=columnAlignFlag, lastInOrder=lastInOrder, selectOrder=selectOrder, useStopTokens=useStopTokens, formattingStep=formattingStep, enforceAscii=enforceAscii, cnvCharRefs=self._useCharRefs, ) else: with open(outputFilePath, "wb") as ofh: self.__writeFile( ofh, containerList, maxLineLength=maxLineLength, columnAlignFlag=columnAlignFlag, lastInOrder=lastInOrder, selectOrder=selectOrder, useStopTokens=useStopTokens, formattingStep=formattingStep, enforceAscii=enforceAscii, cnvCharRefs=self._useCharRefs, ) return True except Exception as ex: if self._raiseExceptions: raise_from(ex, None) else: logger.exception("Failing write for %s with %s", outputFilePath, str(ex)) logger.error("Failing write for %s with %s", outputFilePath, str(ex)) return False
def __call__(self, value, error_callback, convertor_fmt_str): try: return self._choices[value] except (KeyError) as ve: error_callback(convertor_fmt_str, value, self.value_error_str) raise_from(ConvertorError(str(ve)), ve)
def readFile(self, inputFilePath, enforceAscii=False, selectList=None, excludeFlag=False, logFilePath=None, outDirPath=None, cleanUp=False, **kwargs): """Parse the data blocks in the input mmCIF format data file into list of data or definition containers. The data category content within each data block is stored a collection of DataCategory objects within each container. Args: inputFilePath (string): Input file path enforceAscii (bool, optional): Flag to requiring ASCII encoding. See encoding error options. selectList (List, optional): List of data category names to be extracted or excluded from the input file (default: select/extract) excludeFlag (bool, optional): Flag to indicate selectList should be treated as an exclusion list logFilePath (string, optional): Log file path (if not provided this will be derived from the input file.) outDirPath (string, optional): Path for translated/re-encoded files and default logfiles. cleanUp (bool, optional): Flag to automatically remove logs and temporary files on exit. **kwargs: Placeholder for missing keyword arguments. Returns: List of DataContainers: Contents of input file parsed into a list of DataContainer objects. """ if kwargs: logger.warning("Unsupported keyword arguments %s", kwargs.keys()) filePath = str(inputFilePath) # oPath = outDirPath if outDirPath else '.' oPath = self._chooseTemporaryPath(inputFilePath, outDirPath=outDirPath) containerList = [] if enforceAscii: encoding = "ascii" else: encoding = "utf-8" try: # lPath = logFilePath if not lPath: lPath = self._getDefaultFileName(filePath, fileType="cif-parser-log", outDirPath=oPath) # self._setLogFilePath(lPath) # --- if self.__isLocal(filePath) and not self._fileExists(filePath): return [] # if sys.version_info[0] > 2: if self.__isLocal(filePath): filePath = self._uncompress(filePath, oPath) with open(filePath, "r", encoding=encoding, errors=self._readEncodingErrors) as ifh: pRd = PdbxReader(ifh) pRd.read(containerList, selectList, excludeFlag=excludeFlag) else: with closing(requests.get(filePath)) as ifh: it = (line.decode(encoding) for line in ifh.iter_lines()) pRd = PdbxReader(it) pRd.read(containerList, selectList, excludeFlag=excludeFlag) else: if self.__isLocal(filePath): filePath = self._uncompress(filePath, oPath) if enforceAscii: with io.open(filePath, "r", encoding=encoding, errors=self._readEncodingErrors) as ifh: pRd = PdbxReader(ifh) pRd.read(containerList, selectList, excludeFlag=excludeFlag) else: with open(filePath, "r") as ifh: pRd = PdbxReader(ifh) pRd.read(containerList, selectList, excludeFlag=excludeFlag) else: with closing(requests.get(filePath)) as ifh: it = (line.decode(encoding) for line in ifh.iter_lines()) pRd = PdbxReader(it) pRd.read(containerList, selectList, excludeFlag=excludeFlag) if cleanUp: self._cleanupFile(lPath, lPath) self._cleanupFile(filePath != str(inputFilePath), filePath) self._setContainerProperties(containerList, locator=str(inputFilePath), load_date=self._getTimeStamp(), uid=uuid.uuid4().hex) except (PdbxError, PdbxSyntaxError) as ex: msg = "File %r with %s" % (filePath, str(ex)) self._appendToLog([msg]) self._cleanupFile(lPath and cleanUp, lPath) if self._raiseExceptions: raise_from(ex, None) # raise ex from None except Exception as e: msg = "File %r with %s" % (filePath, str(e)) self._appendToLog([msg]) self._cleanupFile(lPath and cleanUp, lPath) if self._raiseExceptions: raise e else: logger.error("Failing read for %s with %s", filePath, str(e)) return containerList
def run_module(root, module, build_script='make.py', osname=None, run_all=True): """.. Run module. Runs script `build_script` in module directory `module` relative to root of repository `root`. Parameters ---------- root : str Directory of root. module: str Name of module. build_script : str Name of build script. Defaults to ``make.py``. osname : str, optional Name of OS. Used to determine syntax of system command. Defaults to ``os.name``. run_all : bool If being run from the root. Will make it so doesn't recheck conda status. Returns ------- None Example ------- The following code runs the script ``root/module/make.py``. .. code-block:: python run_module(root = 'root', module = 'module') """ osname = osname if osname else os.name # https://github.com/sphinx-doc/sphinx/issues/759 try: module_dir = os.path.join(root, module) os.chdir(module_dir) build_script = norm_path(build_script) if not os.path.isfile(build_script): raise CritError(messages.crit_error_no_file % build_script) message = 'Running module `%s`' % module message = format_message(message) message = colored(message, attrs=['bold']) print('\n' + message) if run_all: status = os.system( '%s %s run_all' % (metadata.default_executables[osname]['python'], build_script)) else: status = os.system( '%s %s' % (metadata.default_executables[osname]['python'], build_script)) if status != 0: raise ProgramError() except ProgramError: sys.exit() except: error_message = 'Error with `run_module`. Traceback can be found below.' error_message = format_message(error_message) raise_from(ColoredError(error_message, traceback.format_exc()), None)
def run_latex(paths, program, **kwargs): """.. Run LaTeX script using system command. Compiles document ``program`` using system command, with document specified in the form of ``script.tex``. Status messages are appended to file ``makelog``. PDF outputs are written in directory ``output_dir``. Parameters ---------- paths : dict Dictionary of paths. Dictionary should contain values for all keys listed below. program : str Path of script to run. Path Keys --------- makelog : str Path of makelog. output_dir : str Directory to write PDFs. Note ---- We recommend leaving all other parameters to their defaults. Note ---- This function creates and removes a directory named ``latex_auxiliary_dir``. Other Parameters ---------------- osname : str, optional Name of OS. Used to determine syntax of system command. Defaults to ``os.name``. shell : `bool`, optional See `here <https://docs.python.org/3/library/subprocess.html#frequently-used-arguments>`_. Defaults to ``True``. log : str, optional Path of program log. Program log is only written if specified. Defaults to ``''`` (i.e., not written). executable : str, optional Executable to use for system command. Defaults to executable specified in :ref:`default settings<default settings>`. option : str, optional Options for system command. Defaults to options specified in :ref:`default settings<default settings>`. args : str, optional Not applicable. Returns ------- None Example ------- .. code-block:: python run_latex(paths, program = 'script.tex') """ try: makelog = get_path(paths, 'makelog') output_dir = get_path(paths, 'output_dir') direct = LyXDirective(output_dir=output_dir, application='latex', program=program, makelog=makelog, **kwargs) temp_name = direct.program_name temp_program = direct.program # Generate folder for auxiliary files os.mkdir('latex_auxiliary_dir') # Execute command = metadata.commands[direct.osname][direct.application] % ( direct.executable, direct.option, temp_program) exit_code, stderr = direct.execute_command(command) direct.write_log() if exit_code != 0: error_message = 'LaTeX program executed with errors. Traceback can be found below.' error_message = format_message(error_message) raise_from(ProgramError(error_message, stderr), None) # Move PDF output temp_pdf = os.path.join('latex_auxiliary_dir', temp_name + '.pdf') output_pdf = os.path.join(direct.output_dir, direct.program_name + '.pdf') if temp_pdf != output_pdf: shutil.copy2(temp_pdf, output_pdf) shutil.rmtree('latex_auxiliary_dir') # Remove auxiliary files except ProgramError: raise except: error_message = 'Error with `run_latex`. Traceback can be found below.' error_message = format_message(error_message) write_to_makelog(paths, error_message + '\n\n' + traceback.format_exc()) raise_from(ColoredError(error_message, traceback.format_exc()), None)
def run_jupyter(paths, program, timeout=None, kernel_name=''): """.. Run Jupyter notebook using system command. Runs notebook ``program`` using Python API, with notebook specified in the form of ``notebook.ipynb``. Status messages are appended to file ``makelog``. Parameters ---------- paths : dict Dictionary of paths. Dictionary should contain values for all keys listed below. program : str Path of script to run. Path Keys --------- makelog : str Path of makelog. Note ---- We recommend leaving all other parameters to their defaults. Other Parameters ---------------- timeout : int, optional Time to wait (in seconds) to finish executing a cell before raising exception. Defaults to no timeout. kernel_name : str, optional Name of kernel to use for execution (e.g., ``python2`` for standard Python 2 kernel, ``python3`` for standard Python 3 kernel). Defaults to ``''`` (i.e., kernel specified in notebook). Returns ------- None Example ------- .. code-block:: python run_jupyter(paths, program = 'notebook.ipynb') """ try: program = norm_path(program) with open(program) as f: message = 'Processing notebook: `%s`' % program write_to_makelog(paths, message) print(colored(message, 'cyan')) if not kernel_name: kernel_name = 'python%s' % sys.version_info[0] ep = ExecutePreprocessor(timeout=timeout, kernel_name=kernel_name) nb = nbformat.read(f, as_version=4) ep.preprocess(nb, {'metadata': {'path': '.'}}) with open(program, 'wt') as f: nbformat.write(nb, f) except: error_message = 'Error with `run_jupyter`. Traceback can be found below.' error_message = format_message(error_message) write_to_makelog(paths, error_message + '\n\n' + traceback.format_exc()) raise_from(ColoredError(error_message, traceback.format_exc()), None)
def link_inputs(paths, file_list): """.. Create symlinks to inputs from list of files containing linking instructions. Create symbolic links using instructions contained in files of list ``file_list``. Instructions are `string formatted <https://docs.python.org/3.4/library/string.html#format-string-syntax>`__ using paths dictionary ``paths``. Symbolic links are written in directory ``input_dir``. Status messages are appended to file ``make log``. Instruction files on how to create symbolic links (destinations) from targets (sources) should be formatted in the following way. .. code-block:: md # Each line of instruction should contain a destination and source delimited by a `|` # Lines beginning with # are ignored destination | source .. Note:: Symbolic links can be created to both files and directories. .. Note:: Instruction files can be specified with the * shell pattern (see `here <https://www.gnu.org/software/findutils/manual/html_node/find_html/Shell-Pattern-Matching.html>`__). Destinations and their sources can also be specified with the * shell pattern. The number of wildcards must be the same for both destinations and sources. Parameters ---------- paths : dict Dictionary of paths. Dictionary should contain values for all keys listed below. Dictionary additionally used to string format linking instructions. file_list : str, list File or list of files containing linking instructions. Path Keys --------- input_dir : str Directory to write symlinks. makelog : str Path of makelog. Returns ------- source_map : list List of (source, destination) for each symlink created. Example ------- Suppose you call the following function. .. code-block:: python link_inputs(paths, ['file1'], formatting_dict) Suppose ``paths`` contained the following values. .. code-block:: md paths = {'root': '/User/root/', 'makelog': 'make.log', 'input_dir': 'input'} Now suppose instruction file ``file1`` contained the following text. .. code-block:: md destination1 | {root}/source1 The ``{root}`` in the instruction file would be string formatted using ``paths``. Therefore, the function would parse the instruction as: .. code-block:: md destination1 | /User/root/source1 Example ------- The following code would use instruction files ``file1`` and ``file2`` to create symbolic links. .. code-block:: python link_inputs(paths, ['file1', 'file2']) Suppose instruction file ``file1`` contained the following text. .. code-block:: md destination1 | source1 destination2 | source2 Symbolic links ``destination1`` and ``destination1`` would be created in directory ``paths['input_dir']``. Their targets would be ``source1`` and ``source2``, respectively. Example ------- Suppose you have the following targets. .. code-block:: md source1 source2 source3 Specifying ``destination* | source*`` in one of your instruction files would create the following symbolic links in ``paths['input_dir']``. .. code-block:: md destination1 destination2 destination3 """ try: paths['move_dir'] = get_path(paths, 'input_dir') source_map = _create_links(paths, file_list) message = 'Input links successfully created!' write_to_makelog(paths, message) print(colored(message, metadata.color_success)) return (source_map) except: error_message = 'An error was encountered with `link_inputs`. Traceback can be found below.' error_message = format_message(error_message) write_to_makelog(paths, error_message + '\n\n' + traceback.format_exc()) raise_from(ColoredError(error_message, traceback.format_exc()), None)
def run_stat_transfer(paths, program, **kwargs): """.. Run StatTransfer script using system command. Runs script ``program`` using system command, with script specified in the form of ``script.stc`` or ``script.stcmd``. Status messages are appended to file ``makelog``. Parameters ---------- paths : dict Dictionary of paths. Dictionary should contain values for all keys listed below. program : str Path of script to run. Path Keys --------- makelog : str Path of makelog. Note ---- We recommend leaving all other parameters to their defaults. Other Parameters ---------------- osname : str, optional Name of OS. Used to determine syntax of system command. Defaults to ``os.name``. shell : `bool`, optional See `here <https://docs.python.org/3/library/subprocess.html#frequently-used-arguments>`_. Defaults to ``True``. log : str, optional Path of program log. Program log is only written if specified. Defaults to ``''`` (i.e., not written). executable : str, optional Executable to use for system command. Defaults to executable specified in :ref:`default settings<default settings>`. option : str, optional Options for system command. Defaults to options specified in :ref:`default settings<default settings>`. args : str, optional Not applicable. Returns ------- None Example ------- .. code-block:: python run_stat_transfer(paths, program = 'script.stc') """ try: makelog = get_path(paths, 'makelog') direct = ProgramDirective(application='st', program=program, makelog=makelog, **kwargs) # Execute command = metadata.commands[direct.osname][direct.application] % ( direct.executable, direct.program) exit_code, stderr = direct.execute_command(command) direct.write_log() if exit_code != 0: error_message = 'StatTransfer program executed with errors. Traceback can be found below.' error_message = format_message(error_message) raise_from(ProgramError(error_message, stderr), None) except ProgramError: raise except: error_message = 'Error with `run_stat_transfer`. Traceback can be found below.' error_message = format_message(error_message) write_to_makelog(paths, error_message + '\n\n' + traceback.format_exc()) raise_from(ColoredError(error_message, traceback.format_exc()), None)
def validate_extensions(value, rule_obj, path): """Given a specification for calling a validation function, make sure that the arguments are valid (ie, function is valid, arguments are of the correct type...) Arguments/return values are sort of pykwalify internals (this function is only called from pykwalify) so not listed Todo: Because this is loaded by pykwalify as a file, we need some kind of entry point to set up logging. Or just fork pykwalify and fix the various issues in it. We should also check the function signature using the `inspect` module Raises: BadSchemaError: Something in the validation function spec was wrong """ # pylint: disable=unused-argument try: iter(value) except TypeError as e: raise_from( BadSchemaError( "Invalid value for key - things like body/params/headers/data have to be iterable (list, dictionary, string), not a single value" ), e, ) if isinstance(value, dict) and "$ext" in value: expected_keys = {"function", "extra_args", "extra_kwargs"} validate_keys = value["$ext"] extra = set(validate_keys) - expected_keys if extra: raise BadSchemaError( "Unexpected keys passed to $ext: {}".format(extra)) if "function" not in validate_keys: raise BadSchemaError("No function specified for validation") try: import_ext_function(validate_keys["function"]) except Exception as e: # pylint: disable=broad-except raise_from( BadSchemaError("Couldn't load {}".format( validate_keys["function"])), e) extra_args = validate_keys.get("extra_args") extra_kwargs = validate_keys.get("extra_kwargs") if extra_args and not isinstance(extra_args, list): raise BadSchemaError( "Expected a list of extra_args, got {}".format( type(extra_args))) if extra_kwargs and not isinstance(extra_kwargs, dict): raise BadSchemaError( "Expected a dict of extra_kwargs, got {}".format( type(extra_args))) return True
def run_stata(paths, program, **kwargs): """.. Run Stata script using system command. Runs script ``program`` using system command, with script specified in the form of ``script.do``. Status messages are appended to file ``makelog``. Parameters ---------- paths : dict Dictionary of paths. Dictionary should contain values for all keys listed below. program : str Path of script to run. Path Keys --------- makelog : str Path of makelog. Note ---- We recommend leaving all other parameters to their defaults. Note ---- When a do-file contains a space in its name, different version of Stata save the corresponding log file with different names. Some versions of Stata truncate the name to everything before the first space of the do-file name. Other Parameters ---------------- osname : str, optional Name of OS. Used to determine syntax of system command. Defaults to ``os.name``. shell : `bool`, optional See `here <https://docs.python.org/3/library/subprocess.html#frequently-used-arguments>`_. Defaults to ``True``. log : str, optional Path of program log. Program log is only written if specified. Defaults to ``''`` (i.e., not written). executable : str, optional Executable to use for system command. Defaults to executable specified in :ref:`default settings<default settings>`. option : str, optional Options for system command. Defaults to options specified in :ref:`default settings<default settings>`. args : str, optional Not applicable. Returns ------- None Example ------- .. code-block:: python run_stata(paths, program = 'script.do') """ try: makelog = get_path(paths, 'makelog') direct = ProgramDirective(application='stata', program=program, makelog=makelog, **kwargs) # Get program output (partial) program_name = direct.program.split(" ")[0] program_name = os.path.split(program_name)[-1] program_name = os.path.splitext(program_name)[0] program_log_partial = os.path.join(os.getcwd(), program_name + '.log') # Get program output (full) program_log_full = os.path.join(os.getcwd(), direct.program_name + '.log') # Sanitize program if direct.osname == "posix": direct.program = re.escape(direct.program) # Execute command = metadata.commands[direct.osname]['stata'] % ( direct.executable, direct.option, direct.program) exit_code, stderr = direct.execute_command(command) if exit_code != 0: error_message = 'Stata program executed with errors. Traceback can be found below.' error_message = format_message(error_message) raise_from(ProgramError(error_message, stderr), None) try: output = direct.move_program_output(program_log_partial, direct.log) except: output = direct.move_program_output(program_log_full, direct.log) _check_stata_output(output) except ProgramError: raise except: error_message = 'Error with `run_stata`. Traceback can be found below.' error_message = format_message(error_message) write_to_makelog(paths, error_message + '\n\n' + traceback.format_exc()) raise_from(ColoredError(error_message, traceback.format_exc()), None)
""" from __future__ import absolute_import, division, print_function from typing import Generator, List # pylint: disable=unused-import import numpy from future.utils import iteritems, raise_from from onda.utils import dynamic_import, data_event, exceptions try: import psana # pylint: disable=import-error except ImportError as exc: raise_from( exc=exceptions.OndaMissingDependencyError( "The psana module could not be loaded. The following dependency does not " "appear to be available on the system: psana."), cause=exc, ) ############################ # # # EVENT HANDLING FUNCTIONS # # # ############################ def _psana_offline_event_generator(psana_source, node_rank, mpi_pool_size): # type: (psana._DataSource, int, int) -> Generator[psana.Event, None, None] # Computes how many events the current worker node should process. Splits the # events as equally as possible amongst the workers. If the number of events cannot # be exactly divided by the number of workers, an additional worker is assigned
def run_lyx(paths, program, doctype='', **kwargs): """.. Run LyX script using system command. Compiles document ``program`` using system command, with document specified in the form of ``script.lyx``. Status messages are appended to file ``makelog``. PDF outputs are written in directory ``output_dir``. Parameters ---------- paths : dict Dictionary of paths. Dictionary should contain values for all keys listed below. program : str Path of script to run. doctype : str, optional Type of LyX document. Takes either ``'handout'`` and ``'comments'``. All other strings will default to standard document type. Defaults to ``''`` (i.e., standard document type). Path Keys --------- makelog : str Path of makelog. output_dir : str Directory to write PDFs. Note ---- We recommend leaving all other parameters to their defaults. Other Parameters ---------------- osname : str, optional Name of OS. Used to determine syntax of system command. Defaults to ``os.name``. shell : `bool`, optional See `here <https://docs.python.org/3/library/subprocess.html#frequently-used-arguments>`_. Defaults to ``True``. log : str, optional Path of program log. Program log is only written if specified. Defaults to ``''`` (i.e., not written). executable : str, optional Executable to use for system command. Defaults to executable specified in :ref:`default settings<default settings>`. option : str, optional Options for system command. Defaults to options specified in :ref:`default settings<default settings>`. args : str, optional Not applicable. Returns ------- None Example ------- .. code-block:: python run_lyx(paths, program = 'script.lyx') """ try: makelog = get_path(paths, 'makelog') output_dir = get_path(paths, 'output_dir') direct = LyXDirective(output_dir=output_dir, doctype=doctype, application='lyx', program=program, makelog=makelog, **kwargs) # Make handout/comments LyX file if direct.doctype: temp_name = os.path.join(direct.program_name + '_' + direct.doctype) temp_program = os.path.join(direct.program_dir, temp_name + '.lyx') beamer = False shutil.copy2(direct.program, temp_program) for line in fileinput.input(temp_program, inplace=True, backup='.bak'): if r'\textclass beamer' in line: beamer = True if direct.doctype == 'handout' and beamer and (r'\options' in line): line = line.rstrip('\n') + ', handout\n' elif direct.doctype == 'comments' and ( r'\begin_inset Note Note' in line): line = line.replace('Note Note', 'Note Greyedout') print(line) else: temp_name = direct.program_name temp_program = direct.program # Execute command = metadata.commands[direct.osname][direct.application] % ( direct.executable, direct.option, temp_program) exit_code, stderr = direct.execute_command(command) direct.write_log() if exit_code != 0: error_message = 'LyX program executed with errors. Traceback can be found below.' error_message = format_message(error_message) raise_from(ProgramError(error_message, stderr), None) # Move PDF output temp_pdf = os.path.join(direct.program_dir, temp_name + '.pdf') output_pdf = os.path.join(direct.output_dir, direct.program_name + '.pdf') if temp_pdf != output_pdf: shutil.copy2(temp_pdf, output_pdf) os.remove(temp_pdf) # Remove handout/comments LyX file if direct.doctype: os.remove(temp_program) except ProgramError: raise except: error_message = 'Error with `run_lyx`. Traceback can be found below.' error_message = format_message(error_message) write_to_makelog(paths, error_message + '\n\n' + traceback.format_exc()) raise_from(ColoredError(error_message, traceback.format_exc()), None)
def plugin_load_error(mgr, entry_point, err): """ Handle import errors """ # pylint: disable=unused-argument msg = "Error loading plugin {} - {}".format(entry_point, err) raise_from(exceptions.PluginLoadError(msg), err)
def execute_command(paths, command, **kwargs): """.. Run system command. Runs system command `command` with shell execution boolean ``shell``. Outputs are appended to file ``makelog`` and written to system command log file ``log``. Status messages are appended to file ``makelog``. Parameters ---------- paths : dict Dictionary of paths. Dictionary should contain values for all keys listed below. command : str System command to run. shell : `bool`, optional See `here <https://docs.python.org/3/library/subprocess.html#frequently-used-arguments>`_. Defaults to ``True``. log : str, optional Path of system command log. System command log is only written if specified. Defaults to ``''`` (i.e., not written). Path Keys --------- makelog : str Path of makelog. Note ---- We recommend leaving all other parameters to their defaults. Other Parameters ---------------- osname : str, optional Name of OS. Used to check if OS is supported. Defaults to ``os.name``. Returns ------- None Example ------- The following code executes the ``ls`` command, writes outputs to system command log file ``'file'``, and appends outputs and/or status messages to ``paths['makelog']``. .. code-block:: python execute_command(paths, 'ls', log = 'file') """ try: makelog = get_path(paths, 'makelog') direct = Directive(makelog=makelog, **kwargs) # Execute exit_code, stderr = direct.execute_command(command) direct.write_log() if exit_code != 0: error_message = 'Command executed with errors. Traceback can be found below.' error_message = format_message(error_message) raise_from(ProgramError(error_message, stderr), None) except ProgramError: raise except: error_message = 'Error with `execute_command`. Traceback can be found below.' error_message = format_message(error_message) write_to_makelog(paths, error_message + '\n\n' + traceback.format_exc()) raise_from(ColoredError(error_message, traceback.format_exc()), None)
def bar(): try: foo() except Exception as err: raise_from(ValueError('blue'), err)
def process(self): assert self._genome is not None, 'Error: genome must be specified when preprocessing tracks.' atLeastOneFinalized = False for trackName in self._allTrackNames(): assert trackName != [''] overlapRulesProcessedForTrackName = [] collector = PreProcMetaDataCollector(self._genome, trackName) try: trackName = self._renameTrackNameIfIllegal(trackName) for allowOverlaps in [True, False]: anyGeSourceManagers = False for geSourceManager in self._allGESourceManagers( trackName, allowOverlaps): anyGeSourceManagers = True # PreProcess if needed if self._shouldPreProcess(): PreProcessUtils.removeOutdatedPreProcessedFiles( self._genome, trackName, allowOverlaps, self._mode) if self._shouldPrintProcessMessages( ) and allowOverlaps not in overlapRulesProcessedForTrackName: self._printProcessTrackMessage( trackName, allowOverlaps) overlapRulesProcessedForTrackName.append( allowOverlaps) self._status = 'Trying to preprocess geSource...' geSourceJob = PreProcessGeSourceJob( trackName, geSourceManager, allowOverlaps, self._mode) anyWarnings = geSourceJob.process() if self._raiseIfAnyWarnings and anyWarnings and trackName not in self._warningTrackNames: self._warningTrackNames.append(trackName) collector.updatePreProcDirtyStatus( geSourceJob.hasModifiedData()) # Finalize overlapRule output if needed if anyGeSourceManagers and self._shouldFinalize( ) and collector.preProcIsDirty(): if self._mode == 'Real' and self._shouldMergeChrFolders( ): self._status = 'Trying to combine chromosome vectors into combined vectors.' PreProcessUtils.createBoundingRegionShelve( self._genome, trackName, allowOverlaps) ChrMemmapFolderMerger.merge( self._genome, trackName, allowOverlaps) self._status = 'Trying to remove chromosome folders' PreProcessUtils.removeChrMemmapFolders( self._genome, trackName, allowOverlaps) collector.updatePreProcFilesExistFlag( allowOverlaps, preProcFilesExist=True, merged=True) self._status = 'Trying to check whether 3D data is correct' PreProcessUtils.checkIfEdgeIdsExist( self._genome, trackName, allowOverlaps) PreProcessUtils.checkUndirectedEdges( self._genome, trackName, allowOverlaps) PreProcessUtils.checkUndirectedEdges( self._genome, trackName, allowOverlaps) collector.markOverlapRuleAsFinalized(allowOverlaps) # Finalize track if needed if self._shouldFinalize(): if collector.preProcIsDirty(): self._status = 'Trying to finalize.' collector.finalize(self._username, self._shouldPrintProcessMessages()) if not atLeastOneFinalized: atLeastOneFinalized = True else: collector.removeEntry() except NotSupportedError, e: collector.removeEntry() if DebugConfig.PASS_ON_PREPROCESS_EXCEPTIONS: raise_from( PreprocessWarning( self._addContextToExceptionMsg(e, trackName)), e) else: self._printExceptionMsg(e, trackName, Error=False) except Exception, e: collector.removeEntry() if DebugConfig.PASS_ON_PREPROCESS_EXCEPTIONS: raise_from( PreprocessError( self._addContextToExceptionMsg(e, trackName)), e) else: self._printExceptionMsg(e, trackName, Error=True)
def _try_autodiscover(hostname, credentials, email): # Implements the full chain of autodiscover server discovery attempts. Tries to return autodiscover data from the # final host. try: return _autodiscover_hostname(hostname=hostname, credentials=credentials, email=email, has_ssl=True) except RedirectError as e: if not e.has_ssl: raise_from( AutoDiscoverFailed( '%s redirected us to %s but only HTTPS redirects allowed' % (hostname, e.url)), None) log.info('%s redirected us to %s', hostname, e.server) return _try_autodiscover(e.server, credentials, email) except AutoDiscoverFailed as e: log.info('Autodiscover on %s failed (%s). Trying autodiscover.%s', hostname, e, hostname) try: return _autodiscover_hostname(hostname='autodiscover.%s' % hostname, credentials=credentials, email=email, has_ssl=True) except RedirectError as e: if not e.has_ssl: raise_from( AutoDiscoverFailed( 'autodiscover.%s redirected us to %s but only HTTPS redirects allowed' % (hostname, e.url)), None) log.info('%s redirected us to %s', hostname, e.server) return _try_autodiscover(e.server, credentials, email) except AutoDiscoverFailed: log.info( 'Autodiscover on %s failed (%s). Trying autodiscover.%s (plain HTTP)', hostname, e, hostname) try: return _autodiscover_hostname(hostname='autodiscover.%s' % hostname, credentials=credentials, email=email, has_ssl=False) except RedirectError as e: if not e.has_ssl: raise_from( AutoDiscoverFailed( 'autodiscover.%s redirected us to %s but only HTTPS redirects allowed' % (hostname, e.url)), None) log.info('autodiscover.%s redirected us to %s', hostname, e.server) return _try_autodiscover(e.server, credentials, email) except AutoDiscoverFailed as e: log.info( 'Autodiscover on autodiscover.%s (no TLS) failed (%s). Trying DNS records', hostname, e) hostname_from_dns = _get_canonical_name( hostname='autodiscover.%s' % hostname) try: if not hostname_from_dns: log.info( 'No canonical name on autodiscover.%s Trying SRV record', hostname) hostname_from_dns = _get_hostname_from_srv( hostname='autodiscover.%s' % hostname) # Start over with new hostname return _try_autodiscover(hostname=hostname_from_dns, credentials=credentials, email=email) except AutoDiscoverFailed as e: log.info( 'Autodiscover on %s failed (%s). Trying _autodiscover._tcp.%s', hostname_from_dns, e, hostname) # Start over with new hostname try: hostname_from_dns = _get_hostname_from_srv( hostname='_autodiscover._tcp.%s' % hostname) return _try_autodiscover(hostname=hostname_from_dns, credentials=credentials, email=email) except AutoDiscoverFailed: raise_from( AutoDiscoverFailed( 'All steps in the autodiscover protocol failed' ), None)
def send_command(self, method, params=None): """ Send a command to the bulb. :param str method: The name of the method to send. :param list params: The list of parameters for the method. :raises BulbException: When the bulb indicates an error condition. :returns: The response from the bulb. """ command = {"id": self._cmd_id, "method": method, "params": params} _LOGGER.debug("%s > %s", self, command) try: self._socket.send((json.dumps(command) + "\r\n").encode("utf8")) except socket.error as ex: # Some error occurred, remove this socket in hopes that we can later # create a new one. self.__socket.close() self.__socket = None raise_from(BulbException("A socket error occurred when sending the command."), ex) if self._music_mode: # We're in music mode, nothing else will happen. return {"result": ["ok"]} # The bulb will send us updates on its state in addition to responses, # so we want to make sure that we read until we see an actual response. response = None while response is None: try: data = self._socket.recv(16 * 1024) except socket.error: # An error occured, let's close and abort... self.__socket.close() self.__socket = None response = {"error": "Bulb closed the connection."} break for line in data.split(b"\r\n"): if not line: continue try: line = json.loads(line.decode("utf8")) _LOGGER.debug("%s < %s", self, line) except ValueError: line = {"result": ["invalid command"]} if line.get("method") != "props": # This is probably the response we want. response = line else: self._last_properties.update(line["params"]) if method == "set_music" and params == [0] and "error" in response and response["error"]["code"] == -5000: # The bulb seems to throw an error for no reason when stopping music mode, # it doesn't affect operation and we can't do anything about it, so we might # as well swallow it. return {"id": 1, "result": ["ok"]} if "error" in response: raise BulbException(response["error"]) return response
def __call__(self, value, error_callback, convertor_fmt_str): try: return float(value) except ValueError as ve: error_callback(convertor_fmt_str, value, self.value_error_str) raise_from(ConvertorError(str(ve)), ve)
def __init__(self, filename): try: self.file = open(filename) except IOError as exc: raise_from(DatabaseError('failed to open'), exc)