def search(): q = request.args.get('q', '') page = request.args.get('page', '0') works = [] if q: base_url = "http://bibliographica.org/search.json?q=%s&page=%s" target_url = base_url % (q, page) data = urllib2.urlopen(target_url).read() solrdata = json.loads(data) response = solrdata['response'] data = response["docs"] works = [] for item in data: uri = item["uri"].replace("<", "").replace(">", "") d = urllib2.urlopen(uri + ".json").read(); item["work"] = json.loads(d) out = Bibliographica(item).data work = pdcalc.work.Work(out) work.uri = uri try: result = pdcalc.get_pd_status(work) work.pd_status = {'error': '', 'results': result} except Exception, inst: if app.debug: raise work.pd_status = { 'error': 'Failed to calculate status: %s' % inst, 'results': [] } works.append(work) count = response['numFound']
def test(): calc = {} results = {} for k in calculators.keys(): calc[k] = Calculator(k) data = load_from_file("pdcalc/test/data/01.json") results["uk"] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] results["fr"] = [0, 0, 0, 0, 1, 1, 1, 0, 1, 0] for n,i in enumerate(data): test = Bibliographica(i) work = Work(test.data) for k, v in calc.items(): assert v.get_status(work) == results[k][n] assert pdcalc.get_pd_status(work, k)[k]['pd'] == results[k][n] assert pdcalc.get_pd_status(work)[k]['pd'] == results[k][n]
def api_pd(): # TODO: proper validation (e.g. with colander) if not 'jurisdiction' in request.args or not 'work' in request.args: return jsonify({ 'error': 'Missing jurisdiction or work parameter' }) jurisdiction = request.args['jurisdiction'] workdata = json.loads(request.args['work']) if jurisdiction not in pdcalc.calculators: return jsonify({ 'error': 'No calculator for that jurisdiction' }) work = pdcalc.work.Work(workdata) try: result = pdcalc.get_pd_status(work) except Exception, inst: if app.debug: raise return jsonify({ 'error': '%s' % inst })