def _dump_xml(obj, stream, depth=0, xmltagname=None, header=False): from xml.sax.saxutils import escape, quoteattr if not getattr(stream, 'encoding', None): enc = encode_utf8 else: enc = no_encode if depth == 0 and header: _dump_xml_header(stream, header) indent = ' ' * depth * 2 if xmltagname is None: xmltagname = obj.T.xmltagname if isinstance(obj, Object): obj.validate(depth=1) attrs = [] elems = [] for prop, v in obj.T.ipropvals_to_save(obj, xmlmode=True): if prop.xmlstyle == 'attribute': assert not prop.multivalued assert not isinstance(v, Object) attrs.append((prop.effective_xmltagname, v)) elif prop.xmlstyle == 'content': assert not prop.multivalued assert not isinstance(v, Object) elems.append((None, v)) else: prop.extend_xmlelements(elems, v) attr_str = '' if attrs: attr_str = ' ' + ' '.join('%s=%s' % (k, quoteattr(str(v))) for (k, v) in attrs) if not elems: stream.write(enc(u'%s<%s%s />\n' % (indent, xmltagname, attr_str))) else: oneline = len(elems) == 1 and elems[0][0] is None stream.write( enc(u'%s<%s%s>%s' % (indent, xmltagname, attr_str, '' if oneline else '\n'))) for (k, v) in elems: if k is None: stream.write(enc(escape(newstr(v), {'\0': '�'}))) else: _dump_xml(v, stream=stream, depth=depth + 1, xmltagname=k) stream.write( enc(u'%s</%s>\n' % ('' if oneline else indent, xmltagname))) else: stream.write( enc(u'%s<%s>%s</%s>\n' % (indent, xmltagname, escape(newstr(obj), {'\0': '�'}), xmltagname)))
def _is_probably_categorical(self, column): if newstr(column.dtype) != 'object': # only string types (represented in DataFrame as object) can potentially be categorical return False if len(max(column, key=lambda p: len(newstr(p)))) > 100: return False # value too long to be a category if len(set(column)) > 100: return False # too many unique values to be a category return True
def _is_probably_categorical(self, column): if newstr(column.dtype) != 'object': # only string types (represented in DataFrame as object) can potentially be categorical return False if len(max(column, key=lambda p: len(newstr(p)))) > 100: return False # value too long to be a category if len(set(column)) > 100: return False # too many unique values to be a category return True
def browse(self, max_lines=None, headers=None): """Try reading specified number of lines from the CSV object. Args: max_lines: max number of lines to read. If None, the whole file is read headers: a list of strings as column names. If None, it will use "col0, col1..." Returns: A pandas DataFrame with the schema inferred from the data. Raises: Exception if the csv object cannot be read or not enough lines to read, or the headers size does not match columns size. """ if self.path.startswith('gs://'): lines = Csv._read_gcs_lines(self.path, max_lines) else: lines = Csv._read_local_lines(self.path, max_lines) if len(lines) == 0: return pd.DataFrame(columns=headers) columns_size = len(next(csv.reader([lines[0]], delimiter=self._delimiter))) if headers is None: headers = ['col' + newstr(e) for e in range(columns_size)] if len(headers) != columns_size: raise Exception('Number of columns in CSV do not match number of headers') buf = StringIO() for line in lines: buf.write(line) buf.write('\n') buf.seek(0) df = pd.read_csv(buf, names=headers, delimiter=self._delimiter) for key, col in df.iteritems(): if self._is_probably_categorical(col): df[key] = df[key].astype('category') return df
def test_human_format_and_payload_with_newstr(cosmolog, cosmolog_setup): logstream = cosmolog_setup(formatter='human') logger = cosmolog() msg = 'the {component} has exploded' logger.error(msg, component=newstr('oxygen tank')) logline = logstream.getvalue().split('\n').pop(0) assert logline == 'Apr 13 03:07:53 jupiter.planets.com star_stuff: [ERROR] the oxygen tank has exploded' # noqa: E501
def parse_command_line_parameter(self, p): """Parse command line parameter Uses ParameterSet format-specific type parsers stored in self.casts Raises ValueError with args tuple containing name, value if parameter name isn't in self.values. """ pos = p.find('=') if pos == -1: raise Exception("Not a valid command line parameter. String must be of form 'name=value'") name = p[:pos] value = p[pos + 1:] if self.list_pattern.match(value) or self.tuple_pattern.match(value): value = eval(value) else: for cast in self.casts: try: value = cast(value) break except ValueError: pass try: self._new_param_check(name, value) except ValueError as v: raise ValueError(newstr(v), name, value) # attempt to pass undefined param -- let commands.py deal with return {name: value}
def browse(self, max_lines=None, headers=None): """Try reading specified number of lines from the CSV object. Args: max_lines: max number of lines to read. If None, the whole file is read headers: a list of strings as column names. If None, it will use "col0, col1..." Returns: A pandas DataFrame with the schema inferred from the data. Raises: Exception if the csv object cannot be read or not enough lines to read, or the headers size does not match columns size. """ if self.path.startswith('gs://'): lines = Csv._read_gcs_lines(self.path, max_lines) else: lines = Csv._read_local_lines(self.path, max_lines) if len(lines) == 0: return pd.DataFrame(columns=headers) columns_size = len( next(csv.reader([lines[0]], delimiter=self._delimiter))) if headers is None: headers = ['col' + newstr(e) for e in range(columns_size)] if len(headers) != columns_size: raise Exception( 'Number of columns in CSV do not match number of headers') buf = StringIO() for line in lines: buf.write(line) buf.write('\n') buf.seek(0) df = pd.read_csv(buf, names=headers, delimiter=self._delimiter) for key, col in df.iteritems(): if self._is_probably_categorical(col): df[key] = df[key].astype('category') return df
def test_newstr_is_accepted(cosmolog, cosmolog_setup, capsys): cosmolog_setup() logger = cosmolog() logger.info('someone is from the future: {futstr}', futstr=newstr("hi from the future")) out, err = capsys.readouterr() assert not err
def get_ips_from_fw(fw, un, pw, interface, check_ipv4, check_ipv6): data = [] ipv4 = None ipv6 = None try: t = fw_api_test.fortigate_api(fw, un, pw) port1 = t.show(['cmdb', 'system', 'interface', interface]) if check_ipv4: ipv4 = port1['results'][0].get('ip', None) if check_ipv6: ipv6 = port1['results'][0]['ipv6'].get('ip6-address', None) ips = [] (ips.append(ipv4) if ipv4 is not None else None) (ips.append(ipv6) if ipv6 is not None else None) for ip_data in ips: try: ip_addr = ipaddress.ip_interface( newstr(ip_data.replace(' ', '/'))) if ip_addr.version == 6: data.append({"ip": str(ip_addr.ip), "type": 'AAAA'}) elif ip_addr.version == 4: data.append({"ip": str(ip_addr.ip), "type": 'A'}) except ValueError: print('Can not process IP value of {}'.format(ip_data)) except: print('Something went wrong') raise return data
def testTaskManager(self): "Test TaskManager class" mgr = TaskManager(nworkers=3) self.assertEqual(3, mgr.nworkers()) jobs = [] results = {} kwds = {} args1 = (1, results) args2 = (2, results) args3 = (3, results) args4 = (4, results) pid1 = genkey(str(args1) + str(kwds)) pid2 = genkey(str(args2) + str(kwds)) pid3 = genkey(str(args3) + str(kwds)) pid4 = genkey(newstr(args4) + newstr(kwds)) jobs.append(mgr.spawn(myFunc, *args1)) jobs.append(mgr.spawn(myFunc, *args2)) jobs.append(mgr.spawn(myFunc, *args3)) jobs.append(mgr.spawn(myFunc, *args4)) self.assertEqual(True, mgr.is_alive(pid1)) self.assertEqual(True, mgr.is_alive(pid2)) self.assertEqual(True, mgr.is_alive(pid3)) self.assertEqual(True, mgr.is_alive(pid4)) mgr.joinall(jobs) self.assertEqual(sorted(results.keys()), [1, 2, 3, 4]) self.assertEqual(listvalues(results), ['ok_1', 'ok_2', 'ok_3', 'ok_4']) self.assertEqual(False, mgr.is_alive(pid1)) self.assertEqual(False, mgr.is_alive(pid2)) self.assertEqual(False, mgr.is_alive(pid3)) for worker in mgr.workers: self.assertEqual(True, worker.is_alive()) print("### TaskManager quit") mgr.quit() print("### TaskManager workrers no longer running") for worker in mgr.workers: self.assertEqual(False, worker.is_alive())
def _parse_parameter_from_line(self, line): line = newstr(line.strip()) if "=" in line: parts = line.split("=") name = parts[0].strip() value = "=".join(parts[1:]) try: if SimpleParameterSet._value_represents_string(value): value = newstr(eval(value)) else: value = eval(value) except NameError: value = newstr(value) except (TypeError, ValueError) as err: # e.g. null bytes raise SyntaxError("File is not a valid simple parameter file. %s" % err) if self.COMMENT_CHAR in line: comment = self.COMMENT_CHAR.join(line.split(self.COMMENT_CHAR)[1:]) # this fails if the value is a string containing COMMENT_CHAR else: comment = None else: raise SyntaxError("File is not a valid simple parameter file. This line caused the error: %s" % line) return name, value, comment
def __init__(self, name, lat, lon, population=None, asciiname=None): name = newstr(name) lat = float(lat) lon = float(lon) if asciiname is None: asciiname = name.encode('ascii', errors='replace') if population is None: population = 0 else: population = int(population) Object.__init__(self, name=name, lat=lat, lon=lon, population=population, asciiname=asciiname)
def __init__(self, name, lat, lon, population=None, asciiname=None): name = newstr(name) lat = float(lat) lon = float(lon) if asciiname is None: asciiname = name.encode('ascii', errors='replace') if population is None: population = 0 else: population = int(population) Object.__init__(self, name=name, lat=lat, lon=lon, population=population, asciiname=asciiname)
def figure_out_public_ip(check_ipv4, check_ipv6): ipv6 = None ipv4 = None data = [] ipv4_address = re.compile( '(((25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)\.){3}(25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d))' ) ipv6_address = re.compile( '(?:(?:[0-9A-Fa-f]{1,4}:){6}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|::(?:[0-9A-Fa-f]{1,4}:){5}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:){4}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:){3}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:(?:[0-9A-Fa-f]{1,4}:){,2}[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:){2}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:(?:[0-9A-Fa-f]{1,4}:){,3}[0-9A-Fa-f]{1,4})?::[0-9A-Fa-f]{1,4}:(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:(?:[0-9A-Fa-f]{1,4}:){,4}[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:(?:[0-9A-Fa-f]{1,4}:){,5}[0-9A-Fa-f]{1,4})?::[0-9A-Fa-f]{1,4}|(?:(?:[0-9A-Fa-f]{1,4}:){,6}[0-9A-Fa-f]{1,4})?::)' ) if check_ipv6: try: ip_response = requests.get('http://ipv6.whatismyv6.com') if "200" in str(ip_response): ip_data = ipv6_address.search(newstr( ip_response.content)).group() ip_addr = ipaddress.ip_address(newstr(ip_data)) if ip_addr.version == 6: data.append({"ip": ip_data, "type": 'AAAA'}) except: print('Could not get external IPv6 address') if check_ipv4: try: ip_response = requests.get('http://ipv4.whatismyv6.com') if "200" in str(ip_response): ip_data = ipv4_address.search(newstr( ip_response.content)).group() ip_addr = ipaddress.ip_address(newstr(ip_data)) if ip_addr.version == 4: data.append({"ip": ip_data, "type": 'A'}) except: print('Could not get external IPv4 address') return data
def __init__(self, typ, rang): for i, val in enumerate(rang): if val is None: # the value None is allowed for every parameter type continue if typ in [ParamTypes.INT, ParamTypes.INT_EXP, ParamTypes.INT_CAT]: rang[i] = int(val) elif typ in [ParamTypes.FLOAT, ParamTypes.FLOAT_EXP, ParamTypes.FLOAT_CAT]: rang[i] = float(val) elif typ == ParamTypes.STRING: rang[i] = str(newstr(val)) elif typ == ParamTypes.BOOL: rang[i] = bool(val) self.type = typ self.range = rang
def __init__(self, name, type, values): self.name = name self.type = type for i, val in enumerate(values): if val is None: # the value None is allowed for every parameter type continue if self.type == 'int_cat': values[i] = int(val) elif self.type == 'float_cat': values[i] = float(val) elif self.type == 'string': # this is necessary to avoid a bug in sklearn, which won't be # fixed until 0.20 values[i] = str(newstr(val)) elif self.type == 'bool': values[i] = bool(val) self.values = values
def __init__(self, name, type, values): self.name = name self.type = type for i, val in enumerate(values): if val is None: # the value None is allowed for every parameter type continue if self.type == 'int_cat': values[i] = int(val) elif self.type == 'float_cat': values[i] = float(val) elif self.type == 'string': # this is necessary to avoid a bug in sklearn, which won't be # fixed until 0.20 values[i] = str(newstr(val)) elif self.type == 'bool': values[i] = bool(val) self.values = values
def testUpdateFailedDoc(self): """ _testUpdateFailedDoc_ Verify that the update function will work correctly and not throw a 500 error if the doc didn't make it into the database for some reason. """ change = ChangeState(self.config, "changestate_t") locationAction = self.daoFactory(classname="Locations.New") locationAction.execute("site1", pnn="T2_CH_CERN") testWorkflow = Workflow(spec=self.specUrl, owner="Steve", name="wf001", task=self.taskName) testWorkflow.create() testFileset = Fileset(name="TestFileset") testFileset.create() testSubscription = Subscription(fileset=testFileset, workflow=testWorkflow, split_algo="FileBased") testSubscription.create() testFileA = File(lfn="SomeLFNA", events=1024, size=2048, locations=set(["T2_CH_CERN"])) testFileA.create() testFileset.addFile(testFileA) testFileset.commit() splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=testSubscription) jobGroup = jobFactory(files_per_job=1)[0] testJobA = jobGroup.jobs[0] testJobA["user"] = "******" testJobA["group"] = "DMWM" testJobA["taskType"] = "Merge" testJobA["couch_record"] = str(testJobA["id"]) change.propagate([testJobA], "new", "none") testJobADoc = change.jobsdatabase.document(testJobA["couch_record"]) self.assertTrue("states" in testJobADoc) self.assertTrue("1" in testJobADoc["states"]) testFileB = File(lfn="SomeLFNB", events=1024, size=2048, locations=set(["T2_CH_CERN"])) testFileB.create() testFileset.addFile(testFileB) testFileset.commit() splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=testSubscription) jobGroup = jobFactory(files_per_job=1)[0] testJobB = jobGroup.jobs[0] testJobB["user"] = "******" testJobB["group"] = "DMWM" testJobB["taskType"] = "Merge" testJobB["couch_record"] = newstr(testJobB["id"]) change.propagate([testJobB], "new", "none") testJobBDoc = change.jobsdatabase.document(testJobB["couch_record"]) self.assertTrue("states" in testJobBDoc) self.assertTrue("1" in testJobBDoc["states"]) return
def addError(self, stepName, exitCode, errorType, errorDetails, siteName=None): """ _addError_ Add an error report with an exitCode, type/class of error and details of the error as a string. Also, report attempted site if error happened before landing on it. """ if self.retrieveStep(stepName) is None: # Create a step and set it to failed # Assumption: Adding an error fails a step self.addStep(stepName, status=1) if exitCode is not None: exitCode = int(exitCode) setExitCodes = self.getStepExitCodes(stepName) if exitCode in setExitCodes: logging.warning( "Exit code: %s has been already added to the job report", exitCode) return stepSection = self.retrieveStep(stepName) errorCount = getattr(stepSection.errors, "errorCount", 0) errEntry = "error%s" % errorCount stepSection.errors.section_(errEntry) errDetails = getattr(stepSection.errors, errEntry) errDetails.exitCode = exitCode errDetails.type = str(errorType) try: if isinstance(errorDetails, newstr): errDetails.details = errorDetails elif isinstance(errorDetails, bytes): errDetails.details = decodeBytesToUnicode( errorDetails, 'ignore') else: errDetails.details = newstr(errorDetails) except UnicodeEncodeError as ex: msg = "Failed to encode the job error details for job ID: %s." % self.getJobID( ) msg += "\nException message: %s\nOriginal error details: %s" % ( str(ex), errorDetails) logging.error(msg) msg = "DEFAULT ERROR MESSAGE, because it failed to UTF-8 encode the original message." errDetails.details = msg except UnicodeDecodeError as ex: msg = "Failed to decode the job error details for job ID: %s." % self.getJobID( ) msg += "\nException message: %s\nOriginal error details: %s" % ( str(ex), errorDetails) logging.error(msg) msg = "DEFAULT ERROR MESSAGE, because it failed to UTF-8 decode the original message." errDetails.details = msg setattr(stepSection.errors, "errorCount", errorCount + 1) self.setStepStatus(stepName=stepName, status=exitCode) if siteName: self._setSiteName(site=siteName) return
def _dump_xml(obj, stream, depth=0, xmltagname=None, header=False): from xml.sax.saxutils import escape, quoteattr if not getattr(stream, 'encoding', None): enc = encode_utf8 else: enc = no_encode if depth == 0 and header: _dump_xml_header(stream, header) indent = ' '*depth*2 if xmltagname is None: xmltagname = obj.T.xmltagname if isinstance(obj, Object): obj.validate(depth=1) attrs = [] elems = [] for prop, v in obj.T.ipropvals_to_save(obj, xmlmode=True): if prop.xmlstyle == 'attribute': assert not prop.multivalued assert not isinstance(v, Object) attrs.append((prop.effective_xmltagname, v)) elif prop.xmlstyle == 'content': assert not prop.multivalued assert not isinstance(v, Object) elems.append((None, v)) else: prop.extend_xmlelements(elems, v) attr_str = '' if attrs: attr_str = ' ' + ' '.join( '%s=%s' % (k, quoteattr(str(v))) for (k, v) in attrs) if not elems: stream.write(enc(u'%s<%s%s />\n' % (indent, xmltagname, attr_str))) else: oneline = len(elems) == 1 and elems[0][0] is None stream.write(enc(u'%s<%s%s>%s' % ( indent, xmltagname, attr_str, '' if oneline else '\n'))) for (k, v) in elems: if k is None: stream.write(enc(escape(newstr(v), {'\0': '�'}))) else: _dump_xml(v, stream=stream, depth=depth+1, xmltagname=k) stream.write(enc(u'%s</%s>\n' % ( '' if oneline else indent, xmltagname))) else: stream.write(enc(u'%s<%s>%s</%s>\n' % ( indent, xmltagname, escape(newstr(obj), {'\0': '�'}), xmltagname)))
def report_rest_error(err, trace, throw): """Report a REST error: generate an appropriate log message, set the response headers and raise an appropriate :class:`~.HTTPError`. Normally `throw` would be True to translate the exception `err` into a HTTP server error, but the function can also be called with `throw` set to False if the purpose is merely to log an exception message. :arg err: exception object. :arg trace: stack trace to use in case `err` doesn't have one. :arg throw: raise a :class:`~.HTTPError` if True.""" if isinstance(err, DatabaseError) and err.errobj: offset = None sql, binds, kwbinds = err.lastsql if sql and err.errobj.args and hasattr(err.errobj.args[0], 'offset'): offset = err.errobj.args[0].offset sql = sql[:offset] + "<**>" + sql[offset:] cherrypy.log( "SERVER DATABASE ERROR %d/%d %s %s.%s %s [instance: %s] (%s);" " last statement: %s; binds: %s, %s; offset: %s" % (err.http_code, err.app_code, err.message, getattr(err.errobj, "__module__", "__builtins__"), err.errobj.__class__.__name__, err.errid, err.instance, newstr(err.errobj).rstrip(), sql, binds, kwbinds, offset)) for line in err.trace.rstrip().split("\n"): cherrypy.log(" " + line) cherrypy.response.headers["X-REST-Status"] = newstr(err.app_code) cherrypy.response.headers["X-Error-HTTP"] = newstr(err.http_code) cherrypy.response.headers["X-Error-ID"] = err.errid report_error_header("X-Error-Detail", err.message) report_error_header("X-Error-Info", err.info) if throw: raise cherrypy.HTTPError(err.http_code, err.message) elif isinstance(err, RESTError): if err.errobj: cherrypy.log( "SERVER REST ERROR %s.%s %s (%s); derived from %s.%s (%s)" % (err.__module__, err.__class__.__name__, err.errid, err.message, getattr(err.errobj, "__module__", "__builtins__"), err.errobj.__class__.__name__, newstr(err.errobj).rstrip())) trace = err.trace else: cherrypy.log("SERVER REST ERROR %s.%s %s (%s)" % (err.__module__, err.__class__.__name__, err.errid, err.message)) for line in trace.rstrip().split("\n"): cherrypy.log(" " + line) cherrypy.response.headers["X-REST-Status"] = newstr(err.app_code) cherrypy.response.headers["X-Error-HTTP"] = newstr(err.http_code) cherrypy.response.headers["X-Error-ID"] = err.errid report_error_header("X-Error-Detail", err.message) report_error_header("X-Error-Info", err.info) if throw: raise cherrypy.HTTPError(err.http_code, err.message) elif isinstance(err, cherrypy.HTTPError): errid = "%032x" % random.randrange(1 << 128) cherrypy.log("SERVER HTTP ERROR %s.%s %s (%s)" % (err.__module__, err.__class__.__name__, errid, newstr(err).rstrip())) for line in trace.rstrip().split("\n"): cherrypy.log(" " + line) cherrypy.response.headers["X-REST-Status"] = newstr(200) cherrypy.response.headers["X-Error-HTTP"] = newstr(err.status) cherrypy.response.headers["X-Error-ID"] = errid report_error_header("X-Error-Detail", err._message) if throw: raise err else: errid = "%032x" % random.randrange(1 << 128) cherrypy.log("SERVER OTHER ERROR %s.%s %s (%s)" % (getattr(err, "__module__", "__builtins__"), err.__class__.__name__, errid, newstr(err).rstrip())) for line in trace.rstrip().split("\n"): cherrypy.log(" " + line) cherrypy.response.headers["X-REST-Status"] = 400 cherrypy.response.headers["X-Error-HTTP"] = 500 cherrypy.response.headers["X-Error-ID"] = errid report_error_header("X-Error-Detail", "Server error") if throw: raise cherrypy.HTTPError(500, "Server error")
def _empty_or_comment(cls, line): line = newstr(line.strip()) return len(line) == 0 or line.startswith(cls.COMMENT_CHAR)