def test_pre_simile_missing_id(): obj = atomtools.ejsonize(filesource('rfc4287-1-1-2.atom').source) obj[0][u'type'] = u'atom:entry' del obj[0][u'id'] prepped = exhibit.prep(obj, schema=PIPELINES) return
def test_pre_simile1(): obj = atomtools.ejsonize(filesource('rfc4287-1-1-2.atom').source) obj[0][u'type'] = u'atom:entry' #del obj[0][u'id'] #import pprint; pprint.pprint(obj, stream=sys.stderr) prepped = exhibit.prep(obj, schema=PIPELINES) #import pprint; pprint.pprint(prepped, stream=sys.stderr) return
def test_pre_simile_missing_field(): obj = atomtools.ejsonize(filesource('rfc4287-1-1-2.atom').source) obj[0][u'type'] = u'atom:entry' #Add a requirement for a "spam" field epipelines_plus_spam = {u'spam': (first_item, exhibit.REQUIRE)} epipelines_plus_spam.update(ENTRY_PIPELINE) new_pipelines = { u'atom:entry': epipelines_plus_spam } #Don't bother with atom:feed; unused in test prepped = exhibit.prep(obj, schema=new_pipelines) return
def atom_json(url): ''' Convert Atom syntax to Exhibit JSON (see: http://www.ibm.com/developerworks/web/library/wa-realweb6/ ; this is based on listing 3) Sample requests: * curl "http://localhost:8880/akara.atom.json?url=url=http://zepheira.com/feeds/news.atom" * curl "http://localhost:8880/akara.atom.json?url=http://picasaweb.google.com/data/feed/base/user/dysryi/albumid/5342439351589940049" * curl "http://localhost:8880/akara.atom.json?url=http://earthquake.usgs.gov/eqcenter/catalogs/7day-M2.5.xml" ''' entries = atomtools.ejsonize(url) return json.dumps({'items': entries}, indent=4)
def atom_moin(body, ctype, maxcount=None, folder=None, feed=None): #Sample query: #curl --request POST "http://localhost:8880/atom.moin?feed=http://bitworking.org/news/feed/&maxcount=10&folder=foo091023" #You can set ...&maxcount=100 or whatever number, if you like maxcount = int(maxcount if maxcount else DEFAULT_MAX) H = httplib2.Http('.cache') if USER: H.add_credentials(USER, PASSWD) #Prepare the envelope for the output (POST response) w = structencoder() output = w.cofeed(ROOT(E_CURSOR(u'updates', {u'feed': feed}))) logger.debug('Feed: ' + feed) entries = atomtools.ejsonize(feed) for entry in islice(entries, 0, maxcount): try: logger.debug('ENTRY: ' + repr(entry)) aid = entry[u'label'] slug = atomtools.slug_from_title(aid) #logger.debug('GRIPPO' + repr((id,))) dest = folder + '/' + slug chunks = [ ' title:: ' + entry[u'title'] ] chunks.append(' last changed:: ' + entry[u'updated']) chunks.append(' link:: ' + (first_item(entry[u'link']) or '')) if u'summary' in entry: chunks.append('= Summary =\n' + entry[u'summary']) if u'content_src' in entry: chunks.append('= Content =\n' + entry[u'content_src']) if u'content_text' in entry: chunks.append('= Content =\n' + entry[u'content_text']) #logger.debug("Result IDs: " + ids) if u'categories' in entry: chunks.append(u'= Categories =') for categories in entry[u'categories']: chunks.append(' * ' + categories) chunks.append(' id:: ' + entry[u'id']) chunks.append('= akara:metadata =\n akara:type:: http://purl.org/com/zepheira/zen/resource/webfeed\n') url = absolutize(dest, MOINBASE) headers = {'Content-Type' : 'text/plain'} resp, content = H.request(url, "PUT", body='\n'.join(chunks).encode('utf-8'), headers=headers) logger.debug("Result: " + repr((resp, content))) output.send(E(u'update', {u'entry-id': entry[u'id'], u'page': url})) except (KeyboardInterrupt, SystemExit): raise except Exception, e: logger.info('Exception handling Entry page: ' + repr(e)) output.send(E(u'failure', {u'entry-id': entry[u'id']}))