コード例 #1
0
ファイル: metadata_list.py プロジェクト: ffzg/PhiloLogic4
def metadata_list(environ,start_response):
    status = '200 OK'
    headers = [('Content-type', 'application/json; charset=UTF-8'),("Access-Control-Allow-Origin","*")]
    start_response(status,headers)
    environ["SCRIPT_FILENAME"] = environ["SCRIPT_FILENAME"].replace('scripts/metadata_list.py', '')
    db, path_components, q = parse_cgi(environ)
    cgi = urlparse.parse_qs(environ["QUERY_STRING"],keep_blank_values=True)
    metadata = cgi.get('term',[''])[0]
    field = cgi.get('field',[''])[0]
    yield autocomplete_metadata(metadata, field, db)
コード例 #2
0
def get_results_bibliography(environ, start_response):
    status = '200 OK'
    headers = [('Content-type', 'application/json; charset=UTF-8'),
               ("Access-Control-Allow-Origin", "*")]
    start_response(status, headers)
    environ["SCRIPT_FILENAME"] = environ["SCRIPT_FILENAME"].replace(
        'scripts/get_results_bibliography.py', '')
    cgi = urlparse.parse_qs(environ["QUERY_STRING"], keep_blank_values=True)
    philo_ids = cgi.get('id', [])
    db, path_components, q = parse_cgi(environ)
    obj_level = db.locals["default_object_level"]
    path = db.locals['db_path']
    path = path[:path.rfind("/data")]
    config = f.WebConfig()
    c = db.dbh.cursor()
    citations = []
    citation_counter = defaultdict(int)
    count = 0
    for philo_id in philo_ids:
        obj = ObjectWrapper(philo_id.split(',')[:7], db)
        obj.bytes = []
        citation = f.cite.biblio_citation(db, config, obj)
        if citation not in citation_counter:
            citations.append(citation)
        citation_counter[citation] += 1
        count += 1
    citations_with_count = []
    for cite in citations:
        count = citation_counter[cite]
        citations_with_count.append([cite, count])
    yield json.dumps(citations_with_count)
コード例 #3
0
def export_results(environ, start_response):
    status = '200 OK'
    headers = [('Content-type', 'text/plain; charset=UTF-8'),
               ("Access-Control-Allow-Origin", "*")]
    start_response(status, headers)
    environ["SCRIPT_FILENAME"] = environ["SCRIPT_FILENAME"].replace(
        'scripts/export_results.py', '')
    cgi = urlparse.parse_qs(environ["QUERY_STRING"], keep_blank_values=True)
    output_format = cgi.get('output_format', [''])[0]
    db, path_components, q = parse_cgi(environ)
    config = f.WebConfig()
    path = os.path.abspath(os.path.dirname(__file__)).replace('scripts', "")

    if q['report'] == "concordance" or q['report'] == None:
        results_string, flat_list = export_concordance(db, config, q, path)

    unique_filename = str(uuid.uuid4())
    if output_format == "json":
        write_json(path, unique_filename, results_string)
        link = config.db_url + "/data/exports/" + unique_filename + ".json"
    elif output_format == "csv":
        write_csv(path, unique_filename, flat_list)
        link = config.db_url + "/data/exports/" + unique_filename + '.csv'
    elif output_format == "tab":
        write_tab(path, unique_filename, flat_list)
        link = config.db_url + "/data/exports/" + unique_filename + '.tab'
    yield link
コード例 #4
0
def get_more_context(environ, start_response):
    status = '200 OK'
    headers = [('Content-type', 'application/json; charset=UTF-8'),
               ("Access-Control-Allow-Origin", "*")]
    start_response(status, headers)
    environ["SCRIPT_FILENAME"] = environ["SCRIPT_FILENAME"].replace(
        'scripts/get_more_context.py', '')
    cgi = urlparse.parse_qs(environ["QUERY_STRING"], keep_blank_values=True)
    hit_num = int(cgi.get('hit_num', [0])[0])
    db, path_components, q = parse_cgi(environ)
    config = f.WebConfig()
    if q['start'] == 0:
        start = 0
    else:
        start = q['start'] - 1
    end = (q['end'] or q['results_per_page']) + 1
    hit_range = range(start, end)
    hits = db.query(q["q"], q["method"], q["arg"], **q["metadata"])
    context_size = config['concordance_length'] * 3
    html_list = []
    for i in hit_range:
        try:
            html_list.append(
                r.fetch_concordance(hits[i], environ["SCRIPT_FILENAME"],
                                    context_size))
        except IndexError:
            break
    yield json.dumps(html_list)
コード例 #5
0
def get_frequency(environ,start_response):
    status = '200 OK'
    headers = [('Content-type', 'application/json; charset=UTF-8'),("Access-Control-Allow-Origin","*")]
    start_response(status,headers)
    environ["SCRIPT_FILENAME"] = environ["SCRIPT_FILENAME"].replace('scripts/get_frequency.py', '')
    cgi = urlparse.parse_qs(environ["QUERY_STRING"],keep_blank_values=True)
    frequency_field = cgi.get('frequency_field',[''])[0]
    db, path_components, q = parse_cgi(environ)
    q['field'] = frequency_field
    if q['q'] == '' and q["no_q"]:
        hits = db.get_all(db.locals['default_object_level'])
    else:
        hits = db.query(q["q"],q["method"],q["arg"],**q["metadata"])
    if q["format"] == "json":
        while not len(hits):
            time.sleep(0.5) ## this should be enough time to write all results to disk in most instances.... better fix later
        q["interval_start"] = 0
        q["interval_end"] = len(hits)
        bib_values = dict([(i, j) for i, j in q['metadata'].iteritems() if j])
        field, results = r.generate_frequency(hits, q, db)
        new_results = []
        for label, result in sorted(results.iteritems(), key=lambda (x, y): y["count"], reverse=True):
            if frequency_field == "title":
                author = get_author(label, db)
                if author:
                    label = label + " (%s)" % author.decode('utf-8', 'ignore')
            formatted_result = {"search_term": q['q'], "frequency_field": frequency_field, "results": label, "count": result["count"], "url": "dispatcher.py/" + result["url"].replace('./', ''),
                                "bib_values": bib_values}
            new_results.append(formatted_result)
        yield json.dumps(new_results)
    else:
        field, results = r.generate_frequency(hits, q, db)
        yield json.dumps(results,indent=2)
コード例 #6
0
ファイル: export_results.py プロジェクト: ffzg/PhiloLogic4
def export_results(environ,start_response):
    status = '200 OK'
    headers = [('Content-type', 'text/plain; charset=UTF-8'),("Access-Control-Allow-Origin","*")]
    start_response(status,headers)
    environ["SCRIPT_FILENAME"] = environ["SCRIPT_FILENAME"].replace('scripts/export_results.py', '')
    cgi = urlparse.parse_qs(environ["QUERY_STRING"],keep_blank_values=True)
    output_format = cgi.get('output_format',[''])[0]
    db, path_components, q = parse_cgi(environ)
    config = f.WebConfig()
    path = os.path.abspath(os.path.dirname(__file__)).replace('scripts', "")
    
    if q['report'] == "concordance" or q['report'] == None:
        results_string, flat_list = export_concordance(db, config, q, path)

    unique_filename = str(uuid.uuid4())
    if output_format == "json":
        write_json(path, unique_filename, results_string)
        link = config.db_url + "/data/exports/" + unique_filename + ".json"
    elif output_format == "csv":
        write_csv(path, unique_filename, flat_list)
        link = config.db_url + "/data/exports/" + unique_filename + '.csv'
    elif output_format == "tab":
        write_tab(path, unique_filename, flat_list)
        link = config.db_url + "/data/exports/" + unique_filename + '.tab'
    yield link
コード例 #7
0
ファイル: term_list.py プロジェクト: pleonard212/PhiloLogic4
def term_list(environ, start_response):
    status = '200 OK'
    headers = [('Content-type', 'application/json; charset=UTF-8'),
               ("Access-Control-Allow-Origin", "*")]
    start_response(status, headers)
    environ["SCRIPT_FILENAME"] = environ["SCRIPT_FILENAME"].replace(
        'scripts/term_list.py', '')
    db, path_components, q = parse_cgi(environ)
    cgi = urlparse.parse_qs(environ["QUERY_STRING"], keep_blank_values=True)
    term = cgi.get('term', [''])[0]
    yield autocomplete_term(term, db)
コード例 #8
0
def landing_page_content(environ,start_response):
    status = '200 OK'
    headers = [('Content-type', 'application/json; charset=UTF-8'),("Access-Control-Allow-Origin","*")]
    start_response(status,headers)
    environ["SCRIPT_FILENAME"] = environ["SCRIPT_FILENAME"].replace('scripts/landing_page_content.py', '')
    cgi = urlparse.parse_qs(environ["QUERY_STRING"],keep_blank_values=True)
    content_type = cgi.get('landing_page_content_type',[])[0]
    q_range = cgi.get('range',[])[0].lower().split('-')
    if content_type != "year":
        letter_range = set([chr(i) for i in range(ord(q_range[0]),ord(q_range[1])+1)])
    db, path_components, q = parse_cgi(environ)
    config = f.WebConfig()
    c = db.dbh.cursor()
    content = ''
    if content_type == "author":
        content = generate_author_list(c, letter_range, db, config)
    elif content_type == "title":
        content = generate_title_list(c, letter_range, db, config)
    elif content_type == "year":
        content = generate_year_list(c, q_range, db, config)
    yield json.dumps(content)
コード例 #9
0
def collocation_fetcher(environ,start_response):
    status = '200 OK'
    headers = [('Content-type', 'application/json; charset=UTF-8'),("Access-Control-Allow-Origin","*")]
    start_response(status,headers)
    environ["SCRIPT_FILENAME"] = environ["SCRIPT_FILENAME"].replace('scripts/collocation_fetcher.py', '')
    cgi = urlparse.parse_qs(environ["QUERY_STRING"],keep_blank_values=True)
    full_report = eval(cgi.get('full_report',['True'])[0])
    db, path_components, q = parse_cgi(environ)
    hits = db.query(q["q"],q["method"],q["arg"],**q["metadata"])
    if full_report:
        all_colloc, left_colloc, right_colloc = r.fetch_collocation(hits, environ["SCRIPT_FILENAME"], q, db)
        yield dumps([all_colloc, left_colloc, right_colloc])
    else:
        results = r.fetch_collocation(hits, environ["SCRIPT_FILENAME"], q, db, full_report=False)
        yield dumps(results)
コード例 #10
0
def collocation_fetcher(environ, start_response):
    status = '200 OK'
    headers = [('Content-type', 'application/json; charset=UTF-8'),
               ("Access-Control-Allow-Origin", "*")]
    start_response(status, headers)
    environ["SCRIPT_FILENAME"] = environ["SCRIPT_FILENAME"].replace(
        'scripts/collocation_fetcher.py', '')
    cgi = urlparse.parse_qs(environ["QUERY_STRING"], keep_blank_values=True)
    full_report = eval(cgi.get('full_report', ['True'])[0])
    db, path_components, q = parse_cgi(environ)
    hits = db.query(q["q"], q["method"], q["arg"], **q["metadata"])
    if full_report:
        all_colloc, left_colloc, right_colloc = r.fetch_collocation(
            hits, environ["SCRIPT_FILENAME"], q, db)
        yield dumps([all_colloc, left_colloc, right_colloc])
    else:
        results = r.fetch_collocation(hits,
                                      environ["SCRIPT_FILENAME"],
                                      q,
                                      db,
                                      full_report=False)
        yield dumps(results)
コード例 #11
0
ファイル: get_bibliography.py プロジェクト: ffzg/PhiloLogic4
def get_bibliography(environ,start_response):
    status = '200 OK'
    headers = [('Content-type', 'application/json; charset=UTF-8'),("Access-Control-Allow-Origin","*")]
    start_response(status,headers)
    environ["SCRIPT_FILENAME"] = environ["SCRIPT_FILENAME"].replace('scripts/get_bibliography.py', '')
    cgi = urlparse.parse_qs(environ["QUERY_STRING"],keep_blank_values=True)
    object_level = cgi.get('object_level', '')[0]
    db, path_components, q = parse_cgi(environ)
    if object_level and object_level in object_levels:
        hits = db.get_all(object_level)
    else:
        hits = db.get_all(db.locals['default_object_level'])
    
    results = []
    for hit in hits:
        hit_object = {}
        for field in db.locals['metadata_fields']:
            hit_object[field] = hit[field] or ''
        hit_object['philo_id'] = hit.philo_id
        results.append(hit_object)
    
    yield json.dumps(results)
コード例 #12
0
ファイル: get_more_context.py プロジェクト: ffzg/PhiloLogic4
def get_more_context(environ,start_response):
    status = '200 OK'
    headers = [('Content-type', 'application/json; charset=UTF-8'),("Access-Control-Allow-Origin","*")]
    start_response(status,headers)
    environ["SCRIPT_FILENAME"] = environ["SCRIPT_FILENAME"].replace('scripts/get_more_context.py', '')
    cgi = urlparse.parse_qs(environ["QUERY_STRING"],keep_blank_values=True)
    hit_num = int(cgi.get('hit_num',[0])[0])
    db, path_components, q = parse_cgi(environ)
    config = f.WebConfig()
    if q['start'] == 0:
        start = 0
    else:
        start = q['start'] - 1
    end = (q['end'] or q['results_per_page']) + 1
    hit_range = range(start, end)
    hits = db.query(q["q"],q["method"],q["arg"],**q["metadata"])
    context_size = config['concordance_length'] * 3
    html_list = []
    for i in hit_range:
        try:
            html_list.append(r.fetch_concordance(hits[i], environ["SCRIPT_FILENAME"], context_size))
        except IndexError:
            break
    yield json.dumps(html_list)
コード例 #13
0
def get_bibliography(environ, start_response):
    status = '200 OK'
    headers = [('Content-type', 'application/json; charset=UTF-8'),
               ("Access-Control-Allow-Origin", "*")]
    start_response(status, headers)
    environ["SCRIPT_FILENAME"] = environ["SCRIPT_FILENAME"].replace(
        'scripts/get_bibliography.py', '')
    cgi = urlparse.parse_qs(environ["QUERY_STRING"], keep_blank_values=True)
    object_level = cgi.get('object_level', '')[0]
    db, path_components, q = parse_cgi(environ)
    if object_level and object_level in object_levels:
        hits = db.get_all(object_level)
    else:
        hits = db.get_all(db.locals['default_object_level'])

    results = []
    for hit in hits:
        hit_object = {}
        for field in db.locals['metadata_fields']:
            hit_object[field] = hit[field] or ''
        hit_object['philo_id'] = hit.philo_id
        results.append(hit_object)

    yield json.dumps(results)