def initialize(self): if not self.initialized(): t = self.db.transaction() id = self.new_thing(key='/type/type') last_modified = datetime.datetime.utcnow() data = dict(key='/type/type', type={'key': '/type/type'}, last_modified={ 'type': '/type/datetime', 'value': last_modified }, created={ 'type': '/type/datetime', 'value': last_modified }, revision=1, latest_revision=1, id=id) self.db.update('thing', type=id, where='id=$id', vars=locals()) self.db.insert('version', False, thing_id=id, revision=1) self.db.insert('data', False, thing_id=id, revision=1, data=simplejson.dumps(data)) t.commit()
def GET(self, brief_or_full, idtype, idval): i = web.input() web.ctx.headers = [] bibkey = '%s:%s' % (idtype, idval) result = readlinks.readlink_single(bibkey, i) return simplejson.dumps(result)
def process(query): yield '{\n' for i, r in enumerate(self.db.query(query)): if i: yield ',\n' yield simplejson.dumps(r.key) yield ": " yield process_json(r.key, r.data) yield '}'
def GET(self, brief_or_full, idtype, idval): i = web.input() web.ctx.headers = [] req = '%s:%s' % (idtype, idval) result = readlinks.readlinks(req, i) if req in result: result = result[req] else: result = [] return simplejson.dumps(result)
def GET(self, brief_or_full, bibkey_str): i = web.input() # Work around issue with gunicorn where semicolon and after # get truncated. (web.input() still seems ok) # see https://github.com/benoitc/gunicorn/issues/215 raw_uri = web.ctx.env.get("RAW_URI") raw_path = urlparse.urlsplit(raw_uri).path # handle e.g. '%7C' for '|' decoded_path = urllib2.unquote(raw_path) m = self.path_re.match(decoded_path) if not len(m.groups()) == 2: return simplejson.dumps({}) (brief_or_full, bibkey_str) = m.groups() web.ctx.headers = [] result = readlinks.readlink_multiple(bibkey_str, i) return simplejson.dumps(result)
def GET(self, brief_or_full, req): # params aren't used, see below i = web.input() # Work around issue with gunicorn where semicolon and after # get truncated. (web.input() still seems ok) # see https://github.com/benoitc/gunicorn/issues/215 raw_uri = web.ctx.env.get("RAW_URI") raw_path = urlparse.urlsplit(raw_uri).path # handle e.g. '%7C' for '|' decoded_path = urllib2.unquote(raw_path) m = self.path_re.match(decoded_path) if not len(m.groups()) == 2: return simplejson.dumps({}) (brief_or_full, req) = m.groups() web.ctx.headers = [] result = readlinks.readlinks(req, i) return simplejson.dumps(result)
def GET(self): i = web.input(bibkeys='', callback=None, details="false") details = (i.details.lower() == 'true') result = dynlinks.get_multi(i.bibkeys.split(','), details=details) result = simplejson.dumps(result, indent=4) web.ctx.headers = [] web.header('Content-Type', 'text/javascript') if i.callback: return '%s(%s);' % (i.callback, result) else: return 'var _OLBookInfo = %s;' % result
def save_many(self, docs, timestamp, comment, data, ip, author, action=None): docs = list(docs) action = action or "bulk_update" logger.debug( "saving %d docs - %s", len(docs), dict( timestamp=timestamp, comment=comment, data=data, ip=ip, author=author, action=action, ), ) s = SaveImpl(self.db, self.schema, self.indexer, self.property_manager) # Hack to allow processing of json data before using. Required for OL legacy. s.process_json = process_json docs = common.format_data(docs) changeset = s.save( docs, timestamp=timestamp, comment=comment, ip=ip, author=author, action=action, data=data, ) # update cache. # Use the docs from result as they contain the updated revision and last_modified fields. for doc in changeset.get('docs', []): web.ctx.new_objects[doc['key']] = simplejson.dumps(doc) return changeset