def destroy(): from planet import logger as log cache = config.cache_directory() index = os.path.join(cache, 'index') if not os.path.exists(index): return None idindex = filename(index, 'id') if os.path.exists(idindex): os.unlink(idindex) os.removedirs(index) log.info(idindex + " deleted")
def destroy(): from planet import logger as log cache = config.cache_directory() index=os.path.join(cache,'index') if not os.path.exists(index): return None idindex = filename(index, 'id') if os.path.exists(idindex): os.unlink(idindex) os.removedirs(index) log.info(idindex + " deleted")
def open(): try: cache = config.cache_directory() index = os.path.join(cache, 'index') if not os.path.exists(index): return None import anydbm return anydbm.open(filename(index, 'id'), 'w') except Exception, e: if e.__class__.__name__ == 'DBError': e = e.args[-1] from planet import logger as log log.error(str(e))
def open(): try: cache = config.cache_directory() index=os.path.join(cache,'index') if not os.path.exists(index): return None import dbhash return dbhash.open(filename(index, 'id'),'w') except Exception, e: if e.__class__.__name__ == 'DBError': e = e.args[-1] from planet import logger as log log.error(str(e))
def open(): try: cache = config.cache_directory() index = os.path.join(cache, "index") if not os.path.exists(index): return None return dbhash.open(filename(index, "id"), "w") except Exception, e: if e.__class__.__name__ == "DBError": e = e.args[-1] from planet import logger as log log.error(str(e))
def create(): from planet import logger as log cache = config.cache_directory() index = os.path.join(cache, 'index') if not os.path.exists(index): os.makedirs(index) import anydbm index = anydbm.open(filename(index, 'id'), 'c') try: import libxml2 except: libxml2 = False from xml.dom import minidom for file in glob(cache + "/*"): if os.path.isdir(file): continue elif libxml2: try: doc = libxml2.parseFile(file) ctxt = doc.xpathNewContext() ctxt.xpathRegisterNs('atom', 'http://www.w3.org/2005/Atom') entry = ctxt.xpathEval('/atom:entry/atom:id') source = ctxt.xpathEval('/atom:entry/atom:source/atom:id') if entry and source: index[filename('', entry[0].content)] = source[0].content doc.freeDoc() except: log.error(file) else: try: doc = minidom.parse(file) doc.normalize() ids = doc.getElementsByTagName('id') entry = [e for e in ids if e.parentNode.nodeName == 'entry'] source = [e for e in ids if e.parentNode.nodeName == 'source'] if entry and source: index[filename('',entry[0].childNodes[0].nodeValue)] = \ source[0].childNodes[0].nodeValue doc.freeDoc() except: log.error(file) log.info(str(len(index.keys())) + " entries indexed") index.close() return open()
def create(): from planet import logger as log cache = config.cache_directory() index=os.path.join(cache,'index') if not os.path.exists(index): os.makedirs(index) import dbhash index = dbhash.open(filename(index, 'id'),'c') try: import libxml2 except: libxml2 = False from xml.dom import minidom for file in glob(cache+"/*"): if os.path.isdir(file): continue elif libxml2: try: doc = libxml2.parseFile(file) ctxt = doc.xpathNewContext() ctxt.xpathRegisterNs('atom','http://www.w3.org/2005/Atom') entry = ctxt.xpathEval('/atom:entry/atom:id') source = ctxt.xpathEval('/atom:entry/atom:source/atom:id') if entry and source: index[filename('',entry[0].content)] = source[0].content doc.freeDoc() except: log.error(file) else: try: doc = minidom.parse(file) doc.normalize() ids = doc.getElementsByTagName('id') entry = [e for e in ids if e.parentNode.nodeName == 'entry'] source = [e for e in ids if e.parentNode.nodeName == 'source'] if entry and source: index[filename('',entry[0].childNodes[0].nodeValue)] = \ source[0].childNodes[0].nodeValue doc.freeDoc() except: log.error(file) log.info(str(len(index.keys())) + " entries indexed") index.close() return open()
# parse query parameters form = cgi.FieldStorage() # Start HTML output at once print "Content-Type: text/html;charset=utf-8" # HTML is following print # blank line, end of headers print '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">' print '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="sv"><head><meta http-equiv="Content-Type" content="text/html;charset=utf-8" /><title>Admin results</title></head><body>' print '<div>' # Cache and blacklist dirs cache = config.cache_directory() blacklist = config.cache_blacklist_directory() # Must have command parameter if not "command" in form: print "<p>Unknown command</p>" elif form['command'].value == "blacklist": # Create the blacklist dir if it does not exist if not os.path.exists(blacklist): os.mkdir(blacklist) print "<p>Created directory %s</p>" % blacklist # find list of urls, in the form bl[n]=url
config.load(CONFIG_FILE) # parse query parameters form = cgi.FieldStorage() # Start HTML output at once print "Content-Type: text/html;charset=utf-8" # HTML is following print # blank line, end of headers print '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">' print '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="sv"><head><meta http-equiv="Content-Type" content="text/html;charset=utf-8" /><title>Admin results</title></head><body>' print '<div>' # Cache and blacklist dirs cache = config.cache_directory() blacklist = config.cache_blacklist_directory() # Must have command parameter if not "command" in form: print "<p>Unknown command</p>" elif form['command'].value == "blacklist": # Create the blacklist dir if it does not exist if not os.path.exists(blacklist): os.mkdir(blacklist) print "<p>Created directory %s</p>" % blacklist # find list of urls, in the form bl[n]=url