def pdf_viewer(request, id): text = get_text_or_404_json(id) if text.type == 'pdf' and text.file and not text.uploading: context = RequestContext(request, { 'editable': json.dumps(text.editable(request.user)), 'embeds': json.dumps(text.embeds), 'settings': settings, 'url': text.get_absolute_pdf_url() }) return render_to_response('pdf/viewer.html', context) response = json_response(status=404, text='file not found') return render_to_json_response(response)
def pdf_viewer(request, id): text = get_text_or_404_json(id) if text.type == 'pdf' and text.file and not text.uploading: context = RequestContext( request, { 'editable': json.dumps(text.editable(request.user)), 'embeds': json.dumps(text.embeds), 'settings': settings, 'url': text.get_absolute_pdf_url() }) return render_to_response('pdf/viewer.html', context) response = json_response(status=404, text='file not found') return render_to_json_response(response)
def redirect_url(request, url): if request.META['QUERY_STRING']: url += "?" + request.META['QUERY_STRING'] if settings.CONFIG.get('sendReferrer', False): return redirect(url) else: return HttpResponse('<script>document.location.href=%s;</script>'%json.dumps(url))
def redirect_url(request, url): if request.META['QUERY_STRING']: url += "?" + request.META['QUERY_STRING'] if settings.CONFIG.get('sendReferrer', False): return redirect(url) else: return HttpResponse('<script>document.location.href=%s;</script>' % json.dumps(url))
def get_db_prep_save(self, value, connection): """Convert our JSON object to a string before we save""" if isinstance(value, basestring): value = eval(value) if isinstance(value, list): value = tuple(value) assert isinstance(value, tuple) value = json.dumps(value, default=to_json) return super(TupleField, self).get_db_prep_save(value, connection=connection)
def get_db_prep_save(self, value, connection): """Convert our JSON object to a string before we save""" if value == None: return value if isinstance(value, basestring): value = eval(value) assert isinstance(value, dict) value = json.dumps(value, default=to_json) return super(DictField, self).get_db_prep_save(value, connection=connection)
def oembed(request): format = request.GET.get('format', 'json') #maxwidth = int(request.GET.get('maxwidth', 640)) maxwidth = int(request.GET.get('maxwidth', 2000)) #wafaa #maxheight = int(request.GET.get('maxheight', 480)) maxheight = int(request.GET.get('maxheight', 1080)) url = request.GET['url'] parts = urlparse(url).path.split('/') itemId = parts[1] item = get_object_or_404_json(models.Item, itemId=itemId) embed_url = request.build_absolute_uri('/%s' % itemId) if url.startswith(embed_url): embed_url = url if not '#embed' in embed_url: embed_url = '%s#embed' % embed_url oembed = {} oembed['version'] = '1.0' oembed['type'] = 'video' oembed['provider_name'] = settings.SITENAME oembed['provider_url'] = request.build_absolute_uri('/') oembed['title'] = item.get('title') #oembed['author_name'] = item.get('director') #oembed['author_url'] = ?? height = max(settings.CONFIG['video']['resolutions']) height = min(height, maxheight) width = int(round(height * item.stream_aspect)) if width > maxwidth: width = maxwidth height = min(maxheight, int(width / item.stream_aspect)) oembed[ 'html'] = '<iframe width="%s" height="%s" src="%s" frameborder="0" allowfullscreen></iframe>' % ( width, height, embed_url) oembed['width'] = width oembed['height'] = height thumbheight = 96 thumbwidth = int(thumbheight * item.stream_aspect) thumbwidth -= thumbwidth % 2 oembed['thumbnail_height'] = thumbheight oembed['thumbnail_width'] = thumbwidth oembed['thumbnail_url'] = request.build_absolute_uri( '/%s/%sp.jpg' % (item.itemId, thumbheight)) if format == 'xml': oxml = ET.Element('oembed') for key in oembed: e = ET.SubElement(oxml, key) e.text = unicode(oembed[key]) return HttpResponse( '<?xml version="1.0" encoding="utf-8" standalone="yes"?>\n' + ET.tostring(oxml), 'application/xml') return HttpResponse(json.dumps(oembed, indent=2), 'application/json')
def update(key, value): user_auth = os.environ.get('oxAUTH', os.path.expanduser('~/.ox/auth.json')) auth = {} if os.path.exists(user_auth): f = open(user_auth, "r") data = f.read() f.close() auth = json.loads(data) auth[key] = value f = open(user_auth, "w") f.write(json.dumps(auth, indent=2)) f.close()
def oembed(request): format = request.GET.get('format', 'json') maxwidth = int(request.GET.get('maxwidth', 640)) maxheight = int(request.GET.get('maxheight', 480)) url = request.GET['url'] parts = urlparse(url).path.split('/') itemId = parts[1] #fixme: embed should reflect actuall url item = get_object_or_404_json(models.Item, itemId=itemId) embed_url = request.build_absolute_uri('/%s/embed' % item.itemId) embed_url = '%s#?embed=true' % url oembed = {} oembed['version'] = '1.0' oembed['type'] = 'video' oembed['provider_name'] = settings.SITENAME oembed['provider_url'] = request.build_absolute_uri('/') oembed['title'] = item.get('title') #oembed['author_name'] = item.get('director') #oembed['author_url'] = ?? height = max(settings.CONFIG['video']['resolutions']) height = min(height, maxheight) width = int(round(height * item.stream_aspect)) if width > maxwidth: width = maxwidth height = min(maxheight, int(width / item.stream_aspect)) oembed['html'] = '<iframe width="%s" height="%s" src="%s" frameborder="0" webkitAllowFullScreen mozallowfullscreen allowFullScreen></iframe>' % (width, height, embed_url) oembed['width'] = width oembed['height'] = height thumbheight = 96 thumbwidth = int(thumbheight * item.stream_aspect) thumbwidth -= thumbwidth % 2 oembed['thumbnail_height'] = thumbheight oembed['thumbnail_width'] = thumbwidth oembed['thumbnail_url'] = request.build_absolute_uri('/%s/%sp.jpg' % (item.itemId, thumbheight)) if format == 'xml': oxml = ET.Element('oembed') for key in oembed: e = ET.SubElement(oxml, key) e.text = unicode(oembed[key]) return HttpResponse( '<?xml version="1.0" encoding="utf-8" standalone="yes"?>\n' + ET.tostring(oxml), 'application/xml' ) return HttpResponse(json.dumps(oembed, indent=2), 'application/json')
def cache(filename, type='oshash'): conn = sqlite3.connect(_get_file_cache(), timeout=10) conn.text_factory = str conn.row_factory = sqlite3.Row if not cache.init: c = conn.cursor() c.execute('CREATE TABLE IF NOT EXISTS cache (path varchar(1024) unique, oshash varchar(16), sha1 varchar(42), size int, mtime int, info text)') c.execute('CREATE INDEX IF NOT EXISTS cache_oshash ON cache (oshash)') c.execute('CREATE INDEX IF NOT EXISTS cache_sha1 ON cache (sha1)') conn.commit() cache.init = True c = conn.cursor() c.execute('SELECT oshash, sha1, info, size, mtime FROM cache WHERE path = ?', (filename, )) stat = os.stat(filename) row = None h = None sha1 = None info = '' for row in c: if stat.st_size == row['size'] and int(stat.st_mtime) == int(row['mtime']): value = row[type] if value: if type == 'info': value = json.loads(value) return value h = row['oshash'] sha1 = row['sha1'] info = row['info'] if type == 'oshash': value = h = oshash(filename, cached=False) elif type == 'sha1': value = sha1 = sha1sum(filename, cached=False) elif type == 'info': value = avinfo(filename, cached=False) info = json.dumps(value) t = (filename, h, sha1, stat.st_size, int(stat.st_mtime), info) with conn: sql = u'INSERT OR REPLACE INTO cache values (?, ?, ?, ?, ?, ?)' c.execute(sql, t) return value
def write_json(file, data, ensure_ascii=True, indent=0, sort_keys=False, verbose=False): data = json.dumps(data, ensure_ascii=ensure_ascii, indent=indent, sort_keys=sort_keys) write_file(file, data if ensure_ascii else data.encode('utf-8'), verbose=verbose)
data["directors"] = parse_cast(xml, "directors") data["format"] = find_re(xml, "Format:(.*?)<") data["genre"] = decode_html(find_re(xml, "Genre:(.*?)<")) data["plotSummary"] = decode_html( find_re(xml, 'PLOT SUMMARY</b>.*?<SetFontStyle normalStyle="textColor">(.*?)</SetFontStyle>') ) data["posterUrl"] = find_re(xml, 'reflection="." url="(.*?)"') data["producers"] = parse_cast(xml, "producers") data["rated"] = find_re(xml, "Rated(.*?)<") data["relatedMovies"] = parse_movies(xml, "related movies") data["releaseDate"] = find_re(xml, "Released(.*?)<") data["runTime"] = find_re(xml, "Run Time:(.*?)<") data["screenwriters"] = parse_cast(xml, "screenwriters") data["soundtrackId"] = find_re(xml, "viewAlbum\?id=(.*?)&") data["trailerUrl"] = find_re(xml, 'autoplay="." url="(.*?)"') return data if __name__ == "__main__": from ox.utils import json data = ItunesAlbum(title="So Red the Rose", artist="Arcadia").get_data() print json.dumps(data, sort_keys=True, indent=4) data = ItunesMovie(title="The Matrix", director="Wachowski").get_data() print json.dumps(data, sort_keys=True, indent=4) for v in data["relatedMovies"]: data = ItunesMovie(id=v["id"]).get_data() print json.dumps(data, sort_keys=True, indent=4) data = ItunesMovie(id="272960052").get_data() print json.dumps(data, sort_keys=True, indent=4)
def find(request): ''' Example: find({ query:{ conditions:[{ key: '*', value: 'paris', operator: '='}], operator:'&' }, keys: ['title', 'id'], range: [0, 10], sort: [{key: 'title', operator: '+'}] }) takes { 'query': query, 'sort': array, 'range': array clipsQuery: ... } query: query object, more on query syntax at https://wiki.0x2620.org/wiki/pandora/QuerySyntax sort: array of key, operator dics [ { key: "year", operator: "-" }, { key: "director", operator: "" } ] range: result range, array [from, to] keys: array of keys to return group: group elements by, country, genre, director... with keys, items is list of dicts with requested properties: returns { items: [objects] } Groups takes { 'query': query, 'key': string, 'group': string, 'range': array clips: {} } query: query object, more on query syntax at https://wiki.0x2620.org/wiki/pandora/QuerySyntax range: result range, array [from, to] keys: array of keys to return group: group elements by, country, genre, director... possible values for keys: name, items with keys items contains list of {'name': string, 'items': int}: returns { items: [objects] } without keys: return number of items in given query returns { items: int } Positions takes { 'query': query, 'positions': [], 'sort': array } query: query object, more on query syntax at https://wiki.0x2620.org/wiki/pandora/QuerySyntax positions: ids of items for which positions are required returns { positions: { id: position } } ''' data = json.loads(request.POST['data']) if settings.JSON_DEBUG: print json.dumps(data, indent=2) query = parse_query(data, request.user) response = json_response({}) if 'group' in query: response['data']['items'] = [] items = 'items' item_qs = query['qs'] order_by = _order_by_group(query) qs = models.Facet.objects.filter(key=query['group']).filter( item__id__in=item_qs) qs = qs.values('value').annotate(items=Count('id')).order_by(*order_by) if 'positions' in query: response['data']['positions'] = {} ids = [j['value'] for j in qs] response['data']['positions'] = utils.get_positions( ids, query['positions']) elif 'range' in data: qs = qs[query['range'][0]:query['range'][1]] response['data']['items'] = [{ 'name': i['value'], 'items': i[items] } for i in qs] else: response['data']['items'] = qs.count() elif 'position' in query: qs = _order_query(query['qs'], query['sort']) ids = [j['itemId'] for j in qs.values('itemId')] data['conditions'] = data['conditions'] + { 'value': query['position'], 'key': query['sort'][0]['key'], 'operator': '^' } query = parse_query(data, request.user) qs = _order_query(query['qs'], query['sort']) if qs.count() > 0: response['data']['position'] = utils.get_positions( ids, [qs[0].itemId])[0] elif 'positions' in query: qs = _order_query(query['qs'], query['sort']) ids = [j['itemId'] for j in qs.values('itemId')] response['data']['positions'] = utils.get_positions( ids, query['positions']) elif 'keys' in query: response['data']['items'] = [] qs = _order_query(query['qs'], query['sort']) _p = query['keys'] def get_clips(qs): n = qs.count() if n > query['clip_items']: num = query['clip_items'] clips = [] step = int(n / (num + 1)) i = step while i <= (n - step) and i < n and len(clips) < num: clips.append(qs[i]) i += step else: clips = qs return [ c.json(query['clip_keys'], query['clip_filter']) for c in clips ] def only_p_sums(m): r = {} for p in _p: if p == 'accessed': r[p] = m.sort.accessed or '' elif p == 'modified': r[p] = m.sort.modified elif p == 'timesaccessed': r[p] = m.sort.timesaccessed else: r[p] = m.json.get(p, '') if 'clip_qs' in query: r['clips'] = get_clips(query['clip_qs'].filter(item=m)) return r def only_p(m): r = {} if m: m = json.loads(m, object_hook=ox.django.fields.from_json) for p in _p: r[p] = m.get(p, '') if 'clip_qs' in query: r['clips'] = get_clips( query['clip_qs'].filter(item__itemId=m['id'])) return r qs = qs[query['range'][0]:query['range'][1]] #response['data']['items'] = [m.get_json(_p) for m in qs] if filter( lambda p: p in ('accessed', 'modified', 'timesaccessed', 'viewed'), _p): qs = qs.select_related() response['data']['items'] = [only_p_sums(m) for m in qs] else: response['data']['items'] = [ only_p(m['json']) for m in qs.values('json') ] else: # otherwise stats items = query['qs'] files = File.objects.filter(item__in=items).filter(size__gt=0) r = files.aggregate(Sum('duration'), Sum('pixels'), Sum('size')) totals = [i['id'] for i in settings.CONFIG['totals']] if 'duration' in totals: response['data']['duration'] = r['duration__sum'] if 'files' in totals: response['data']['files'] = files.count() if 'items' in totals: response['data']['items'] = items.count() if 'pixels' in totals: response['data']['pixels'] = r['pixels__sum'] if 'runtime' in totals: response['data']['runtime'] = items.aggregate( Sum('sort__runtime'))['sort__runtime__sum'] or 0 if 'size' in totals: response['data']['size'] = r['size__sum'] for key in ('runtime', 'duration', 'pixels', 'size'): if key in totals and response['data'][key] == None: response['data'][key] = 0 return render_to_json_response(response)
def archive_news(): ''' this is just an example of an archiving application ''' import os from ox.utils import json import time count = {} colon = [] archivePath = '/Volumes/Rolux Home/Desktop/Data/spiegel.de/Spiegel Online' days = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] localtime = time.localtime() year = int(time.strftime('%Y', localtime)) month = int(time.strftime('%m', localtime)) day = int(time.strftime('%d', localtime)) - 1 for y in range(year, 1999, -1): if y == year: mMax = month else: mMax = 12 for m in range(mMax, 0, -1): if y == year and m == month: dMax = day elif m == 2 and y % 4 == 0 and y % 400 != 0: dMax = days[m] + 1 else: dMax = days[m] for d in range(dMax, 0, -1): print 'getNews(%d, %d, %d)' % (y, m, d) news = getNews(y, m ,d) for new in news: dirname = archivePath + '/' + new['date'][0:4] + '/' + new['date'][5:7] + new['date'][8:10] + '/' + new['date'][11:13] + new['date'][14:16] if not os.path.exists(dirname): os.makedirs(dirname) if new['url'][-5:] == '.html': filename = dirname + '/' + new['url'].split('/')[-1][:-5] + '.json' else: filename = dirname + '/' + new['url'] + '.json' if not os.path.exists(filename) or True: data = json.dumps(new, ensure_ascii = False) f = open(filename, 'w') f.write(data) f.close() filename = filename[:-5] + '.txt' if not os.path.exists(filename) or True: data = split_title(new['title']) data.append(new['description']) data = '\n'.join(data) f = open(filename, 'w') f.write(data) f.close() filename = dirname + '/' + new['imageUrl'].split('/')[-1] if not os.path.exists(filename): data = ox.cache.read_url(new['imageUrl']) f = open(filename, 'w') f.write(data) f.close() strings = new['url'].split('/') string = strings[3] if len(strings) == 6: string += '/' + strings[4] if not count.has_key(string): count[string] = {'count': 1, 'string': '%s %s http://www.spiegel.de/%s/0,1518,archiv-%d-%03d,00.html' % (new['date'], new['date'], new['section'].lower(), y, int(datetime(y, m, d).strftime('%j')))} else: count[string] = {'count': count[string]['count'] + 1, 'string': '%s %s' % (new['date'], count[string]['string'][17:])} strings = split_title(new['title']) if strings[0] != new['title1'] or strings[1] != new['title2']: colon.append('%s %s %s: %s' % (new['date'], new['title'], new['title1'], new['title2'])) for key in sorted(count): print '%6d %-24s %s' % (count[key]['count'], key, count[key]['string']) for value in colon: print value
def archive_issues(): ''' this is just an example of an archiving application ''' p = {} import os from ox.utils import json import time archivePath = '/Volumes/Rolux Home/Desktop/Data/spiegel.de/Der Spiegel' localtime = time.localtime() year = int(time.strftime('%Y', localtime)) week = int(time.strftime('%W', localtime)) for y in range(year, 1993, -1): if y == year: wMax = week + 1 else: wMax = 53 for w in range(wMax, 0, -1): print 'get_issue(%d, %d)' % (y, w) issue = get_issue(y, w) if issue: dirname = '%s/%d/%02d' % (archivePath, y, w) if not os.path.exists(dirname): os.makedirs(dirname) filename = '%s/Der Spiegel %d %02d.json' % (dirname, y, w) if not os.path.exists(filename): data = json.dumps(issue, ensure_ascii = False) f = open(filename, 'w') f.write(data) f.close() filename = '%s/Der Spiegel %d %02d.txt' % (dirname, y, w) if not os.path.exists(filename): data = [] for item in issue['contents']: data.append('%3d %s' % (item['page'], item['title'])) data = '\n'.join(data) f = open(filename, 'w') f.write(data) f.close() filename = '%s/Der Spiegel %d %02d.jpg' % (dirname, y, w) if not os.path.exists(filename): data = ox.cache.read_url(issue['coverUrl']) f = open(filename, 'w') f.write(data) f.close() for page in issue['pageUrl']: url = issue['pageUrl'][page] if url: filename = '%s/Der Spiegel %d %02d %03d.jpg' % (dirname, y, w, page) if not os.path.exists(filename): data = ox.cache.read_url(url) f = open(filename, 'w') f.write(data) f.close() if not p: p = {'num': 1, 'sum': issue['pages'], 'min': issue['pages'], 'max': issue['pages']} else: p['num'] += 1 p['sum'] += issue['pages'] if issue['pages'] < p['min']: p['min'] = issue['pages'] if issue['pages'] > p['max']: p['max'] = issue['pages'] print p['min'], p['sum'] / p['num'], p['max']
def find(request): ''' Example: find({ query:{ conditions:[{ key: '*', value: 'paris', operator: '='}], operator:'&' }, keys: ['title', 'id'], range: [0, 10], sort: [{key: 'title', operator: '+'}] }) takes { 'query': query, 'sort': array, 'range': array clipsQuery: ... } query: query object, more on query syntax at https://wiki.0x2620.org/wiki/pandora/QuerySyntax sort: array of key, operator dics [ { key: "year", operator: "-" }, { key: "director", operator: "" } ] range: result range, array [from, to] keys: array of keys to return group: group elements by, country, genre, director... with keys, items is list of dicts with requested properties: returns { items: [objects] } Groups takes { 'query': query, 'key': string, 'group': string, 'range': array clips: {} } query: query object, more on query syntax at https://wiki.0x2620.org/wiki/pandora/QuerySyntax range: result range, array [from, to] keys: array of keys to return group: group elements by, country, genre, director... possible values for keys: name, items with keys items contains list of {'name': string, 'items': int}: returns { items: [objects] } without keys: return number of items in given query returns { items: int } Positions takes { 'query': query, 'positions': [], 'sort': array } query: query object, more on query syntax at https://wiki.0x2620.org/wiki/pandora/QuerySyntax positions: ids of items for which positions are required returns { positions: { id: position } } ''' data = json.loads(request.POST['data']) if settings.JSON_DEBUG: print json.dumps(data, indent=2) query = parse_query(data, request.user) response = json_response({}) if 'group' in query: response['data']['items'] = [] items = 'items' item_qs = query['qs'] order_by = _order_by_group(query) qs = models.Facet.objects.filter(key=query['group']).filter(item__id__in=item_qs) qs = qs.values('value').annotate(items=Count('id')).order_by(*order_by) if 'positions' in query: response['data']['positions'] = {} ids = [j['value'] for j in qs] response['data']['positions'] = utils.get_positions(ids, query['positions']) elif 'range' in data: qs = qs[query['range'][0]:query['range'][1]] response['data']['items'] = [{'name': i['value'], 'items': i[items]} for i in qs] else: response['data']['items'] = qs.count() elif 'position' in query: qs = _order_query(query['qs'], query['sort']) ids = [j['itemId'] for j in qs.values('itemId')] data['conditions'] = data['conditions'] + { 'value': query['position'], 'key': query['sort'][0]['key'], 'operator': '^' } query = parse_query(data, request.user) qs = _order_query(query['qs'], query['sort']) if qs.count() > 0: response['data']['position'] = utils.get_positions(ids, [qs[0].itemId])[0] elif 'positions' in query: qs = _order_query(query['qs'], query['sort']) ids = [j['itemId'] for j in qs.values('itemId')] response['data']['positions'] = utils.get_positions(ids, query['positions']) elif 'keys' in query: response['data']['items'] = [] qs = _order_query(query['qs'], query['sort']) _p = query['keys'] def get_clips(qs): n = qs.count() if n > query['clip_items']: num = query['clip_items'] clips = [] step = int(n / (num + 1)) i = step while i <= (n - step) and i < n and len(clips) < num: clips.append(qs[i]) i += step else: clips = qs return [c.json(query['clip_keys'], query['clip_filter']) for c in clips] def only_p_sums(m): r = {} for p in _p: if p == 'accessed': r[p] = m.sort.accessed or '' elif p == 'modified': r[p] = m.sort.modified elif p == 'timesaccessed': r[p] = m.sort.timesaccessed else: r[p] = m.json.get(p, '') if 'clip_qs' in query: r['clips'] = get_clips(query['clip_qs'].filter(item=m)) return r def only_p(m): r = {} if m: m = json.loads(m, object_hook=ox.django.fields.from_json) for p in _p: r[p] = m.get(p, '') if 'clip_qs' in query: r['clips'] = get_clips(query['clip_qs'].filter(item__itemId=m['id'])) return r qs = qs[query['range'][0]:query['range'][1]] #response['data']['items'] = [m.get_json(_p) for m in qs] if filter(lambda p: p in ( 'accessed', 'modified', 'timesaccessed', 'viewed' ), _p): qs = qs.select_related() response['data']['items'] = [only_p_sums(m) for m in qs] else: response['data']['items'] = [only_p(m['json']) for m in qs.values('json')] else: # otherwise stats items = query['qs'] files = File.objects.filter(item__in=items).filter(size__gt=0) r = files.aggregate( Sum('duration'), Sum('pixels'), Sum('size') ) totals = [i['id'] for i in settings.CONFIG['totals']] if 'duration' in totals: response['data']['duration'] = r['duration__sum'] if 'files' in totals: response['data']['files'] = files.count() if 'items' in totals: response['data']['items'] = items.count() if 'pixels' in totals: response['data']['pixels'] = r['pixels__sum'] if 'runtime' in totals: response['data']['runtime'] = items.aggregate(Sum('sort__runtime'))['sort__runtime__sum'] or 0 if 'size' in totals: response['data']['size'] = r['size__sum'] for key in ('runtime', 'duration', 'pixels', 'size'): if key in totals and response['data'][key] == None: response['data'][key] = 0 return render_to_json_response(response)