Exemplo n.º 1
0
def get_sheet_id(name):
    sheets_ = request('sheets').get('data', [])
    names = slugify([i['name'] for i in sheets_])
    try:
        index = int(name) if name.isdigit() else names.index(slugify(name))
        return sheets_[index]['id']
    except (IndexError, ValueError):
        print('Sheet not found.', file=sys.stderr)
        sys.exit(1)
Exemplo n.º 2
0
def sheets(name):
    if name:
        id_ = get_sheet_id(name[0])
        sheet = request('sheets/{id_}'.format(id_=id_))
        rows = get_rows(sheet)
        print(json.dumps(rows, indent=2, sort_keys=True))
    else:
        sheets_ = request('sheets')
        names = slugify([i['name'] for i in sheets_.get('data', [])])
        for sheet in get_colored_enumerated_list(names):
            click.echo(sheet)
Exemplo n.º 3
0
# encoding: utf8

import urllib, urllib2
from redis import Redis
from slugs import slugify, unslugify
import json
import time

if __name__=="__main__":
    r = Redis()
    while True:
        everything = r.get('everything')
        everything = json.loads(everything)
        everything = [ (x['base']['book'], x['slug']) for x in everything ]
        results = {}
        for book,slug in everything:
            url = "http://watch.gov.il/#!z=b:%s_s:%s" % (slugify(book),slug)
            query = urllib.urlencode([("query","SELECT url,commentsbox_count FROM link_stat WHERE url='%s'" % url),
                                      ('format','json')])
            response = urllib2.urlopen('https://api.facebook.com/method/fql.query?%s' % query).read()
            print response
            try:
                response = json.loads(response)
            except:
                break
            commentsbox_count = response[0]['commentsbox_count']
            results[slug] = commentsbox_count
            print slug,commentsbox_count
        r.set('fbcomments',json.dumps(results,indent=0))
        time.sleep(600)
Exemplo n.º 4
0
    return redirect('/list')

if __name__=="__main__":
    r = Redis()
    everything = file('data.json').read()
    r.set('version',int(os.stat('data.json').st_mtime))
    r.set("everything",everything)
    data = json.loads(everything)
    for x in data:
        key = "slug:%s" % x["slug"]
        if r.exists(key):
            skey = "subscribe:%s" % key
            x["subscribers"] = r.scard(skey)
        else:
            x.setdefault('subscribers',0)
        r.set(key,json.dumps(x,indent=0))
    for profile_name, profile_image in profiles.iteritems():
        r.set("profile:%s" % slugify(profile_name), file('static/img/%s' % profile_image).read())

    try:
        from gevent import monkey ; monkey.patch_all()
        from gevent.wsgi import WSGIServer

        http_server = WSGIServer(('', 5000), app)
        print "note: running with greenlet"
        http_server.serve_forever()

    except:
        print "note: running without greenlet"
        app.run(host="0.0.0.0",debug=True)