def test_log_syslog(self): with patch('syslog.syslog', new=self.dummy_syslog): try: logger = logging.getLogger() old_handlers = [] for hdl in logger.handlers: logger.removeHandler(hdl) old_handlers.append(hdl) test_handler = SysLogLibHandler("USER") logger.addHandler(test_handler) logger.setLevel(logging.WARNING) log.info("info") log.warn("warn") log.warning("warning") log.error("error") log.critical("critical") log.debug("debug") lines = self._syslog.getvalue().split("\n") assert("info" not in lines) assert("12:warn" in lines) assert("12:warning" in lines) assert("10:critical" in lines) assert("11:error" in lines) assert("debug" not in lines) finally: logger.removeHandler(test_handler) for hdl in old_handlers: logger.addHandler(hdl)
def test_log_plain(self): try: logfile = StringIO() logger = logging.getLogger() old_handlers = [] for hdl in logger.handlers: logger.removeHandler(hdl) old_handlers.append(hdl) test_handler = logging.StreamHandler(logfile) logger.addHandler(test_handler) logger.setLevel(logging.WARNING) log.info("info") log.warn("warn") log.warning("warning") log.error("error") log.critical("critical") log.debug("debug") lines = logfile.getvalue().split("\n") assert ("info" not in lines) assert ("warn" in lines) assert ("warning" in lines) assert ("critical" in lines) assert ("error" in lines) assert ("debug" not in lines) finally: logger.removeHandler(test_handler) for hdl in old_handlers: logger.addHandler(hdl)
def test_log_plain(self): try: logfile = StringIO() logger = logging.getLogger() old_handlers = [] for hdl in logger.handlers: logger.removeHandler(hdl) old_handlers.append(hdl) test_handler = logging.StreamHandler(logfile) logger.addHandler(test_handler) logger.setLevel(logging.WARNING) log.info("info") log.warn("warn") log.warning("warning") log.error("error") log.critical("critical") log.debug("debug") lines = logfile.getvalue().split("\n") assert("info" not in lines) assert("warn" in lines) assert("warning" in lines) assert("critical" in lines) assert("error" in lines) assert("debug" not in lines) finally: logger.removeHandler(test_handler) for hdl in old_handlers: logger.addHandler(hdl)
def test_log_syslog(self): with patch('syslog.syslog', new=self.dummy_syslog): try: logger = logging.getLogger() old_handlers = [] for hdl in logger.handlers: logger.removeHandler(hdl) old_handlers.append(hdl) test_handler = SysLogLibHandler("USER") logger.addHandler(test_handler) logger.setLevel(logging.WARNING) log.info("info") log.warn("warn") log.warning("warning") log.error("error") log.critical("critical") log.debug("debug") lines = self._syslog.getvalue().split("\n") assert ("info" not in lines) assert ("12:warn" in lines) assert ("12:warning" in lines) assert ("10:critical" in lines) assert ("11:error" in lines) assert ("debug" not in lines) finally: logger.removeHandler(test_handler) for hdl in old_handlers: logger.addHandler(hdl)
def run(self): def _parse_date(str): if str is None: return datetime.new() return datetime(*parsedate(str)[:6]) self.start_time = clock() try: requests_cache.install_cache('.cache') if not self.enable_cache: log.debug("removing '%s' from cache" % self.url) requests_cache.get_cache().delete_url(self.url) log.debug("fetching '%s'" % self.url) if self.url.startswith('file://'): path = self.url[7:] if not os.path.exists(path): raise IOError("file not found: %s" % path) with open(path, 'r') as fd: self.result = fd.read() self.cached = False self.date = datetime.now() self.last_modified = datetime.fromtimestamp( os.stat(path).st_mtime) else: self.resp = requests.get(self.url, timeout=60, verify=False) self.last_modified = _parse_date( self.resp.headers.get('last-modified', self.resp.headers.get('date', None))) self.date = _parse_date(self.resp.headers['date']) self.cached = getattr(self.resp, 'from_cache', False) self.status = self.resp.status_code if self.resp.status_code != 200: raise IOError(self.resp.reason) self.result = self.resp.content log.debug("got %d bytes from '%s'" % (len(self.result), self.url)) except Exception, ex: traceback.print_exc() log.warn("unable to fetch '%s': %s" % (self.url, ex)) self.ex = ex self.result = None
def run(self): def _parse_date(str): if str is None: return datetime.new() return datetime(*parsedate(str)[:6]) self.start_time = clock() try: requests_cache.install_cache('.cache') if not self.enable_cache: log.debug("removing '%s' from cache" % self.url) requests_cache.get_cache().delete_url(self.url) log.debug("fetching '%s'" % self.url) if self.url.startswith('file://'): path = self.url[7:] if not os.path.exists(path): raise IOError("file not found: %s" % path) with open(path, 'r') as fd: self.result = fd.read() self.cached = False self.date = datetime.now() self.last_modified = datetime.fromtimestamp(os.stat(path).st_mtime) else: self.resp = requests.get(self.url, timeout=60, verify=False) self.last_modified = _parse_date(self.resp.headers.get('last-modified', self.resp.headers.get('date', None))) self.date = _parse_date(self.resp.headers['date']) self.cached = getattr(self.resp, 'from_cache', False) self.status = self.resp.status_code if self.resp.status_code != 200: raise IOError(self.resp.reason) self.result = self.resp.content log.debug("got %d bytes from '%s'" % (len(self.result), self.url)) except Exception, ex: traceback.print_exc() log.warn("unable to fetch '%s': %s" % (self.url, ex)) self.ex = ex self.result = None
def _lookup(self, member, xp=None): """ :param member: Either an entity, URL or a filter expression. Find a (set of) EntityDescriptor element(s) based on the specified 'member' expression. """ def _hash(hn, strv): if hn == 'null': return strv if not hasattr(hashlib, hn): raise MetadataException("Unknown digest mechanism: '%s'" % hn) hash_m = getattr(hashlib, hn) h = hash_m() h.update(strv) return h.hexdigest() if xp is None: xp = "//md:EntityDescriptor" if member is None: lst = [] for m in self.keys(): log.debug("resolving %s filtered by %s" % (m, xp)) lst.extend(self._lookup(m, xp)) return lst elif hasattr(member, 'xpath'): log.debug("xpath filter %s <- %s" % (xp, member)) return member.xpath(xp, namespaces=NS) elif type(member) is str or type(member) is unicode: log.debug("string lookup %s" % member) if '+' in member: member = member.strip('+') log.debug("lookup intersection of '%s'" % ' and '.join(member.split('+'))) hits = None for f in member.split("+"): f = f.strip() if hits is None: hits = set(self._lookup(f, xp)) else: other = self._lookup(f, xp) hits.intersection_update(other) if not hits: log.debug("empty intersection") return [] if hits is not None and hits: return list(hits) else: return [] if "!" in member: (src, xp) = member.split("!") if len(src) == 0: src = None log.debug("filtering using %s" % xp) else: log.debug("selecting %s filtered by %s" % (src, xp)) return self._lookup(src, xp) m = re.match("^\{(.+)\}(.+)$", member) if m is not None: log.debug("attribute-value match: %s='%s'" % (m.group(1), m.group(2))) return self.index.get(m.group(1), m.group(2).rstrip("/")) m = re.match("^(.+)=(.+)$", member) if m is not None: log.debug("attribute-value match: %s='%s'" % (m.group(1), m.group(2))) return self.index.get(m.group(1), m.group(2).rstrip("/")) log.debug("basic lookup %s" % member) for idx in DIGESTS: e = self.index.get(idx, member) if e: log.debug("found %s in %s index" % (e, idx)) return e e = self.get(member, None) if e is not None: return self._lookup(e, xp) # hackish but helps save people from their misstakes e = self.get("%s.xml" % member, None) if e is not None: if not "://" in member: # not an absolute URL log.warn( "Found %s.xml as an alias - AVOID extensions in 'select as' statements" % member) return self._lookup(e, xp) if "://" in member: # looks like a URL and wasn't an entity or collection - recurse away! log.debug("recursively fetching members from '%s'" % member) # note that this supports remote lists which may be more rope # than is healthy return [self._lookup(line, xp) for line in urllib.urlopen(member).iterlines()] return [] elif hasattr(member, '__iter__') and type(member) is not dict: if not len(member): member = self.keys() return [self._lookup(m, xp) for m in member] else: raise MetadataException("What about %s ??" % member)
def consumer(q, njobs, stats, next_jobs=None, resolved=None): if next_jobs is None: next_jobs = [] if resolved is None: resolved = set() nfinished = 0 while nfinished < njobs: info = None try: log.debug("waiting for next thread to finish...") thread = q.get(True) thread.join(timeout) if thread.isAlive(): raise MetadataException( "thread timeout fetching '%s'" % thread.url) info = { 'Time Spent': thread.time() } if thread.ex is not None: raise thread.ex else: if thread.result is not None: info['Bytes'] = len(thread.result) else: raise MetadataException( "empty response fetching '%s'" % thread.url) info['Cached'] = thread.cached info['Date'] = str(thread.date) info['Last-Modified'] = str(thread.last_modified) info['Tries'] = thread.tries xml = thread.result.strip() if thread.status is not None: info['Status'] = thread.resp.status_code t = self.parse_metadata( StringIO(xml), key=thread.verify, base_url=thread.url) if t is None: self.fire(type=EVENT_IMPORT_FAIL, url=thread.url) raise MetadataException( "no valid metadata found at '%s'" % thread.url) relt = root(t) if relt.tag in ('{%s}XRD' % NS['xrd'], '{%s}XRDS' % NS['xrd']): log.debug("%s looks like an xrd document" % thread.url) for xrd in t.xpath("//xrd:XRD", namespaces=NS): log.debug("xrd: %s" % xrd) for link in xrd.findall(".//{%s}Link[@rel='%s']" % (NS['xrd'], NS['md'])): url = link.get("href") certs = xmlsec.CertDict(link) fingerprints = certs.keys() fp = None if len(fingerprints) > 0: fp = fingerprints[0] log.debug("fingerprint: %s" % fp) next_jobs.append((url, fp, url, 0)) elif relt.tag in ('{%s}EntityDescriptor' % NS['md'], '{%s}EntitiesDescriptor' % NS['md']): cacheDuration = self.default_cache_duration if self.respect_cache_duration: cacheDuration = root(t).get( 'cacheDuration', self.default_cache_duration) offset = duration2timedelta(cacheDuration) if thread.cached: if thread.last_modified + offset < datetime.now() - duration2timedelta(self.min_cache_ttl): raise MetadataException( "cached metadata expired") else: log.debug("found cached metadata for '%s' (last-modified: %s)" % (thread.url, thread.last_modified)) ne = self.import_metadata(t, url=thread.id) info['Number of Entities'] = ne else: log.debug("got fresh metadata for '%s' (date: %s)" % ( thread.url, thread.date)) ne = self.import_metadata(t, url=thread.id) info['Number of Entities'] = ne info['Cache Expiration Time'] = str( thread.last_modified + offset) certs = xmlsec.CertDict(relt) cert = None if certs.values(): cert = certs.values()[0].strip() resolved.add((thread.url, cert)) else: raise MetadataException( "unknown metadata type for '%s' (%s)" % (thread.url, relt.tag)) except Exception, ex: # traceback.print_exc(ex) log.warn("problem fetching '%s' (will retry): %s" % (thread.url, ex)) if info is not None: info['Exception'] = ex if thread.tries < self.retry_limit: next_jobs.append( (thread.url, thread.verify, thread.id, thread.tries + 1)) else: # traceback.print_exc(ex) log.error( "retry limit exceeded for %s (last error was: %s)" % (thread.url, ex)) finally:
try: dirname, basename = os.path.split(fn) with tempfile.NamedTemporaryFile("w", delete=False, prefix=".%s" % basename, dir=dirname) as tmp: tmp.write(data) tmpn = tmp.name if os.path.exists(tmpn) and os.stat(tmpn).st_size > 0: os.rename(tmpn, fn) return True except Exception, ex: log.error(ex) finally: if tmpn is not None and os.path.exists(tmpn): try: os.unlink(tmpn) except Exception, ex: log.warn(ex) pass return False site_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "site") templates = TemplateLookup(directories=[os.path.join(site_dir, "templates")]) def template(name): return templates.get_template(name) class URLFetch(threading.Thread): def __init__(self, url, verify, id=None, enable_cache=False, tries=0): self.url = url.strip()
def certreport(req, *opts): """ Generate a report of the certificates (optionally limited by expiration time or key size) found in the selection. :param req: The request :param opts: Options (not used) :return: always returns the unmodified working document **Examples** .. code-block:: yaml - certreport: error_seconds: 0 warning_seconds: 864000 error_bits: 1024 warning_bits: 2048 For key size checking this will report keys with a size *less* than the size specified, defaulting to errors for keys smaller than 1024 bits and warnings for keys smaller than 2048 bits. It should be understood as the minimum key size for each report level, as such everything below will create report entries. Remember that you need a 'publish' or 'emit' call after certreport in your plumbing to get useful output. PyFF ships with a couple of xslt transforms that are useful for turning metadata with certreport annotation into HTML. """ if req.t is None: raise PipeException("Your pipeline is missing a select statement.") if not req.args: req.args = {} if type(req.args) is not dict: raise PipeException("usage: certreport {warning: 864000, error: 0}") error_seconds = int(req.args.get('error_seconds', "0")) warning_seconds = int(req.args.get('warning_seconds', "864000")) error_bits = int(req.args.get('error_bits', "1024")) warning_bits = int(req.args.get('warning_bits', "2048")) seen = {} for eid in req.t.xpath("//md:EntityDescriptor/@entityID", namespaces=NS, smart_strings=False): for cd in req.t.xpath("md:EntityDescriptor[@entityID='%s']//ds:X509Certificate" % eid, namespaces=NS, smart_strings=False): try: cert_pem = cd.text cert_der = base64.b64decode(cert_pem) m = hashlib.sha1() m.update(cert_der) fp = m.hexdigest() if not seen.get(fp, False): seen[fp] = True cdict = xmlsec.utils.b642cert(cert_pem) keysize = cdict['modulus'].bit_length() cert = cdict['cert'] if keysize < error_bits: e = cd.getparent().getparent().getparent().getparent().getparent() req.md.annotate(e, "certificate-error", "keysize too small", "%s has keysize of %s bits (less than %s)" % (cert.getSubject(), keysize, error_bits)) log.error("%s has keysize of %s" % (eid, keysize)) elif keysize < warning_bits: e = cd.getparent().getparent().getparent().getparent().getparent() req.md.annotate(e, "certificate-warning", "keysize small", "%s has keysize of %s bits (less than %s)" % (cert.getSubject(), keysize, warning_bits)) log.warn("%s has keysize of %s" % (eid, keysize)) et = datetime.strptime("%s" % cert.getNotAfter(), "%y%m%d%H%M%SZ") now = datetime.now() dt = et - now if total_seconds(dt) < error_seconds: e = cd.getparent().getparent().getparent().getparent().getparent() req.md.annotate(e, "certificate-error", "certificate has expired", "%s expired %s ago" % (cert.getSubject(), -dt)) log.error("%s expired %s ago" % (eid, -dt)) elif total_seconds(dt) < warning_seconds: e = cd.getparent().getparent().getparent().getparent().getparent() req.md.annotate(e, "certificate-warning", "certificate about to expire", "%s expires in %s" % (cert.getSubject(), dt)) log.warn("%s expires in %s" % (eid, dt)) except Exception, ex: log.error(ex)
def _lookup(self, member, xp=None): """ :param member: Either an entity, URL or a filter expression. Find a (set of) EntityDescriptor element(s) based on the specified 'member' expression. """ def _hash(hn, strv): if hn == 'null': return strv if not hasattr(hashlib, hn): raise MetadataException("Unknown digest mechanism: '%s'" % hn) hash_m = getattr(hashlib, hn) h = hash_m() h.update(strv) return h.hexdigest() if xp is None: xp = "//md:EntityDescriptor" if member is None: lst = [] for m in self.keys(): log.debug("resolving %s filtered by %s" % (m, xp)) lst.extend(self._lookup(m, xp)) return lst elif hasattr(member, 'xpath'): log.debug("xpath filter %s <- %s" % (xp, member)) return member.xpath(xp, namespaces=NS) elif type(member) is str or type(member) is unicode: log.debug("string lookup %s" % member) if '+' in member: member = member.strip('+') log.debug("lookup intersection of '%s'" % ' and '.join(member.split('+'))) hits = None for f in member.split("+"): f = f.strip() if hits is None: hits = set(self._lookup(f, xp)) else: other = self._lookup(f, xp) hits.intersection_update(other) if not hits: log.debug("empty intersection") return [] if hits is not None and hits: return list(hits) else: return [] if "!" in member: (src, xp) = member.split("!") if len(src) == 0: src = None log.debug("filtering using %s" % xp) else: log.debug("selecting %s filtered by %s" % (src, xp)) return self._lookup(src, xp) m = re.match("^\{(.+)\}(.+)$", member) if m is not None: log.debug("attribute-value match: %s='%s'" % (m.group(1), m.group(2))) return self.index.get(m.group(1), m.group(2).rstrip("/")) m = re.match("^(.+)=(.+)$", member) if m is not None: log.debug("attribute-value match: %s='%s'" % (m.group(1), m.group(2))) return self.index.get(m.group(1), m.group(2).rstrip("/")) log.debug("basic lookup %s" % member) for idx in DIGESTS: e = self.index.get(idx, member) if e: log.debug("found %s in %s index" % (e, idx)) return e e = self.get(member, None) if e is not None: return self._lookup(e, xp) e = self.get("%s.xml" % member, None) # hackish but helps save people from their misstakes if e is not None: if not "://" in member: # not an absolute URL log.warn("Found %s.xml as an alias - AVOID extensions in 'select as' statements" % member) return self._lookup(e, xp) if "://" in member: # looks like a URL and wasn't an entity or collection - recurse away! log.debug("recursively fetching members from '%s'" % member) # note that this supports remote lists which may be more rope than is healthy return [self._lookup(line, xp) for line in urllib.urlopen(member).iterlines()] return [] elif hasattr(member, '__iter__') and type(member) is not dict: if not len(member): member = self.keys() return [self._lookup(m, xp) for m in member] else: raise MetadataException("What about %s ??" % member)
def consumer(q, njobs, stats, next_jobs=None, resolved=None): if next_jobs is None: next_jobs = [] if resolved is None: resolved = set() nfinished = 0 while nfinished < njobs: info = None try: log.debug("waiting for next thread to finish...") thread = q.get(True) thread.join(timeout) if thread.isAlive(): raise MetadataException("thread timeout fetching '%s'" % thread.url) info = { 'Time Spent': thread.time() } if thread.ex is not None: raise thread.ex else: if thread.result is not None: info['Bytes'] = len(thread.result) else: raise MetadataException("empty response fetching '%s'" % thread.url) info['Cached'] = thread.cached info['Date'] = str(thread.date) info['Last-Modified'] = str(thread.last_modified) info['Tries'] = thread.tries xml = thread.result.strip() if thread.status is not None: info['Status'] = thread.status t = self.parse_metadata(StringIO(xml), key=thread.verify, base_url=thread.url) if t is None: self.fire(type=EVENT_IMPORT_FAIL, url=thread.url) raise MetadataException("no valid metadata found at '%s'" % thread.url) relt = root(t) if relt.tag in ('{%s}XRD' % NS['xrd'], '{%s}XRDS' % NS['xrd']): log.debug("%s looks like an xrd document" % thread.url) for xrd in t.xpath("//xrd:XRD", namespaces=NS): log.debug("xrd: %s" % xrd) for link in xrd.findall(".//{%s}Link[@rel='%s']" % (NS['xrd'], NS['md'])): url = link.get("href") certs = xmlsec.CertDict(link) fingerprints = certs.keys() fp = None if len(fingerprints) > 0: fp = fingerprints[0] log.debug("fingerprint: %s" % fp) next_jobs.append((url, fp, url, 0)) elif relt.tag in ('{%s}EntityDescriptor' % NS['md'], '{%s}EntitiesDescriptor' % NS['md']): cacheDuration = self.default_cache_duration if self.respect_cache_duration: cacheDuration = root(t).get('cacheDuration', self.default_cache_duration) offset = duration2timedelta(cacheDuration) if thread.cached: if thread.last_modified + offset < datetime.now() - duration2timedelta(self.min_cache_ttl): raise MetadataException("cached metadata expired") else: log.debug("found cached metadata for '%s' (last-modified: %s)" % (thread.url, thread.last_modified)) ne = self.import_metadata(t, url=thread.id) info['Number of Entities'] = ne else: log.debug("got fresh metadata for '%s' (date: %s)" % (thread.url, thread.date)) ne = self.import_metadata(t, url=thread.id) info['Number of Entities'] = ne info['Cache Expiration Time'] = str(thread.last_modified + offset) certs = xmlsec.CertDict(relt) cert = None if certs.values(): cert = certs.values()[0].strip() resolved.add((thread.url, cert)) else: raise MetadataException("unknown metadata type for '%s' (%s)" % (thread.url, relt.tag)) except Exception, ex: #traceback.print_exc(ex) log.warn("problem fetching '%s' (will retry): %s" % (thread.url, ex)) if info is not None: info['Exception'] = ex if thread.tries < self.retry_limit: next_jobs.append((thread.url, thread.verify, thread.id, thread.tries + 1)) else: #traceback.print_exc(ex) log.error("retry limit exceeded for %s (last error was: %s)" % (thread.url, ex)) finally:
def certreport(req, *opts): """ Generate a report of the certificates (optionally limited by expiration time) found in the selection. :param req: The request :param opts: Options (not used) :return: always returns the unmodified working document **Examples** .. code-block:: yaml - certreport: error_seconds: 0 warning_seconds: 864000 Remember that you need a 'publish' or 'emit' call after certreport in your plumbing to get useful output. PyFF ships with a couple of xslt transforms that are useful for turning metadata with certreport annotation into HTML. """ if req.t is None: raise PipeException("Your plumbing is missing a select statement.") if not req.args: req.args = {} if type(req.args) is not dict: raise PipeException("usage: certreport {warning: 864000, error: 0}") error_seconds = int(req.args.get('error', "0")) warning_seconds = int(req.args.get('warning', "864000")) seen = {} for eid in req.t.xpath("//md:EntityDescriptor/@entityID", namespaces=NS): for cd in req.t.xpath("md:EntityDescriptor[@entityID='%s']//ds:X509Certificate" % eid, namespaces=NS): try: cert_pem = cd.text cert_der = base64.b64decode(cert_pem) m = hashlib.sha1() m.update(cert_der) fp = m.hexdigest() if not seen.get(fp, False): seen[fp] = True cdict = xmlsec.b642cert(cert_pem) cert = cdict['cert'] et = datetime.strptime("%s" % cert.getNotAfter(), "%Y%m%d%H%M%SZ") now = datetime.now() dt = et - now if total_seconds(dt) < error_seconds: e = cd.getparent().getparent().getparent().getparent().getparent() req.md.annotate(e, "certificate-error", "certificate has expired", "%s expired %s ago" % (cert.getSubject(), -dt)) log.error("%s expired %s ago" % (eid, -dt)) elif total_seconds(dt) < warning_seconds: e = cd.getparent().getparent().getparent().getparent().getparent() req.md.annotate(e, "certificate-warning", "certificate about to expire", "%s expires in %s" % (cert.getSubject(), dt)) log.warn("%s expires in %s" % (eid, dt)) except Exception, ex: log.error(ex)
def certreport(req, *opts): """ Generate a report of the certificates (optionally limited by expiration time or key size) found in the selection. :param req: The request :param opts: Options (not used) :return: always returns the unmodified working document **Examples** .. code-block:: yaml - certreport: error_seconds: 0 warning_seconds: 864000 error_bits: 1024 warning_bits: 2048 For key size checking this will report keys with a size *less* than the size specified, defaulting to errors for keys smaller than 1024 bits and warnings for keys smaller than 2048 bits. It should be understood as the minimum key size for each report level, as such everything below will create report entries. Remember that you need a 'publish' or 'emit' call after certreport in your plumbing to get useful output. PyFF ships with a couple of xslt transforms that are useful for turning metadata with certreport annotation into HTML. """ if req.t is None: raise PipeException("Your pipeline is missing a select statement.") if not req.args: req.args = {} if type(req.args) is not dict: raise PipeException("usage: certreport {warning: 864000, error: 0}") error_seconds = int(req.args.get('error_seconds', "0")) warning_seconds = int(req.args.get('warning_seconds', "864000")) error_bits = int(req.args.get('error_bits', "1024")) warning_bits = int(req.args.get('warning_bits', "2048")) seen = {} for eid in req.t.xpath("//md:EntityDescriptor/@entityID", namespaces=NS, smart_strings=False): for cd in req.t.xpath( "md:EntityDescriptor[@entityID='%s']//ds:X509Certificate" % eid, namespaces=NS, smart_strings=False): try: cert_pem = cd.text cert_der = base64.b64decode(cert_pem) m = hashlib.sha1() m.update(cert_der) fp = m.hexdigest() if not seen.get(fp, False): entity_elt = cd.getparent().getparent().getparent( ).getparent().getparent() seen[fp] = True cdict = xmlsec.utils.b642cert(cert_pem) keysize = cdict['modulus'].bit_length() cert = cdict['cert'] if keysize < error_bits: req.md.annotate( entity_elt, "certificate-error", "keysize too small", "%s has keysize of %s bits (less than %s)" % (cert.getSubject(), keysize, error_bits)) log.error("%s has keysize of %s" % (eid, keysize)) elif keysize < warning_bits: req.md.annotate( entity_elt, "certificate-warning", "keysize small", "%s has keysize of %s bits (less than %s)" % (cert.getSubject(), keysize, warning_bits)) log.warn("%s has keysize of %s" % (eid, keysize)) notafter = cert.getNotAfter() if notafter is None: req.md.annotate( entity_elt, "certificate-error", "certificate has no expiration time", "%s has no expiration time" % cert.getSubject()) else: try: et = datetime.strptime("%s" % notafter, "%y%m%d%H%M%SZ") now = datetime.now() dt = et - now if total_seconds(dt) < error_seconds: req.md.annotate( entity_elt, "certificate-error", "certificate has expired", "%s expired %s ago" % (cert.getSubject(), -dt)) log.error("%s expired %s ago" % (eid, -dt)) elif total_seconds(dt) < warning_seconds: req.md.annotate( entity_elt, "certificate-warning", "certificate about to expire", "%s expires in %s" % (cert.getSubject(), dt)) log.warn("%s expires in %s" % (eid, dt)) except ValueError as ex: req.md.annotate( entity_elt, "certificate-error", "certificate has unknown expiration time", "%s unknown expiration time %s" % (cert.getSubject(), notafter)) except Exception as ex: log.error(ex)
fn = os.path.expanduser(fn) dirname, basename = os.path.split(fn) with tempfile.NamedTemporaryFile('w', delete=False, prefix=".%s" % basename, dir=dirname) as tmp: tmp.write(data) tmpn = tmp.name if os.path.exists(tmpn) and os.stat(tmpn).st_size > 0: os.rename(tmpn, fn) return True except Exception, ex: log.error(ex) finally: if tmpn is not None and os.path.exists(tmpn): try: os.unlink(tmpn) except Exception, ex: log.warn(ex) pass return False site_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "site") templates = TemplateLookup(directories=[os.path.join(site_dir, 'templates')]) def template(name): return templates.get_template(name) class URLFetch(threading.Thread): def __init__(self, url, verify, id=None, enable_cache=False, tries=0): self.url = url.strip()