Example #1
0
def getpostgresserverversion() -> str:
	"""

	what it says on the label...

	hipparchiaDB=# select version();

	------------------------------------------------------------------------------------------------------------------
	PostgreSQL 11.1 on x86_64-apple-darwin17.7.0, compiled by Apple LLVM version 10.0.0 (clang-1000.11.45.5), 64-bit
	(1 row)


	hipparchiaDB=# SHOW server_version;
	server_version
	----------------
	11.1
	(1 row)

	:return:
	"""

	dbconnection = ConnectionObject()
	cursor = dbconnection.cursor()

	q = 'SHOW server_version;'
	cursor.execute(q)
	v = cursor.fetchone()
	version = v[0]

	dbconnection.connectioncleanup()

	return version
Example #2
0
def grablistoflines(table: str, uidlist: list, dbcursor=None) -> list:
    """

	fetch many lines at once

	select shortname from authors where universalid = ANY('{lt0860,gr1139}');

	:param uidlist:
	:return:
	"""

    dbconnection = None
    needscleanup = False

    if not dbcursor:
        dbconnection = ConnectionObject()
        dbcursor = dbconnection.cursor()
        needscleanup = True

    lines = [int(uid.split('_ln_')[1]) for uid in uidlist]

    qtemplate = 'SELECT {wtmpl} from {tb} WHERE index = ANY(%s)'

    q = qtemplate.format(wtmpl=worklinetemplate, tb=table)
    d = (lines, )
    dbcursor.execute(q, d)
    lines = dbcursor.fetchall()

    if needscleanup:
        dbconnection.connectioncleanup()

    lines = [dblineintolineobject(l) for l in lines]

    return lines
Example #3
0
def returnfirstwork(authorid: str, dbcursor=None) -> str:
	"""
	more exception handling
	this will produce bad results, but it will not kill the program
	:param authorid:
	:param dbcursor:
	:return:
	"""

	needscleanup = False
	if not dbcursor:
		dbconnection = ConnectionObject()
		dbcursor = dbconnection.cursor()
		needscleanup = True

	# print('panic and grab first work of',authorid)
	query = 'SELECT universalid FROM works WHERE universalid LIKE %s ORDER BY universalid'
	data = (authorid+'%',)
	dbcursor.execute(query, data)
	found = dbcursor.fetchone()
	try:
		found = found[0]
	except IndexError:
		# yikes: an author we don't know about
		# perseus will send you gr1415, but he is not in the db
		# homer...
		found = returnfirstwork('gr0012w001', dbcursor)

	if needscleanup:
		dbconnection.connectioncleanup()

	return found
Example #4
0
def buildoptionchecking() -> dict:
	"""

	check what build options were set

	hipparchiaDB=# SELECT corpusname, buildoptions FROM builderversion;
	 corpusname |                                              buildoptions
	------------+---------------------------------------------------------------------------------------------------------
	 lt         | hideknownblemishes: y, htmlifydatabase: n, simplifybrackets: y, simplifyquotes: y, smartsinglequotes: y

	:return:
	"""
	dbconnection = ConnectionObject()
	dbcursor = dbconnection.cursor()

	q = 'SELECT corpusname, buildoptions FROM builderversion'
	try:
		dbcursor.execute(q)
		results = dbcursor.fetchall()
	except:
		# psycopg2.errors.UndefinedColumn; but Windows will tell you that there is no 'errors' module...
		results = None
	dbconnection.connectioncleanup()

	optiondict = dict()
	if results:
		for r in results:
			optiondict[r[0]] = r[1]
		for o in optiondict:
			optiondict[o] = optiondict[o].split(', ')
			# turn {'simplifyquotes: y', 'simplifybrackets: y', 'hideknownblemishes: y', 'smartsinglequotes: y', 'htmlifydatabase: n'}
			# into {'hideknownblemishes': 'y', 'htmlifydatabase': 'n', 'simplifybrackets': 'y', 'simplifyquotes': 'y', 'smartsinglequotes': 'y'}
			optiondict[o] = {a.split(': ')[0]: a.split(': ')[1] for a in optiondict[o]}
	return optiondict
Example #5
0
def dbloadasingleworkobject(workuniversalid: str) -> dbOpus:
	"""

	if you get stranded down inside a series of function calls you have no way of regaining access to the master dictionary
	of work objects

	:param workuniversalid:
	:return:
	"""

	dbconnection = ConnectionObject()
	cursor = dbconnection.cursor()

	q = """
	SELECT universalid, title, language, publication_info, 
		levellabels_00, levellabels_01, levellabels_02, levellabels_03, levellabels_04, levellabels_05, 
		workgenre, transmission, worktype, provenance, recorded_date, converted_date, wordcount, 
		firstline, lastline, authentic FROM works WHERE universalid=%s
	"""

	d = (workuniversalid,)
	cursor.execute(q, d)
	r = cursor.fetchone()

	workobject = dbOpus(*r)

	dbconnection.connectioncleanup()

	return workobject
 def __init__(self, workerid, foundlineobjects: ListProxy,
              listofplacestosearch, searchobject: SearchObject,
              dbconnection, searchfunction):
     self.workerid = workerid
     self.commitcount = 0
     if dbconnection:
         self.dbconnection = dbconnection
         self.needconnectioncleanup = False
     else:
         # you are running Windows and can't pickle your connections
         self.dbconnection = ConnectionObject()
         self.needconnectioncleanup = True
     self.dbcursor = self.dbconnection.cursor()
     self.so = searchobject
     self.foundlineobjects = foundlineobjects
     self.listofplacestosearch = listofplacestosearch
     self.searchfunction = searchfunction
     self.searchfunctionparameters = None
     self.activepoll = self.so.poll
     self.parameterswapper = self.simpleparamswapper
     self.emptytest = self.listofplacestosearch
     try:
         self.getnetxitem = self.listofplacestosearch.pop
     except AttributeError:
         # this should get implemented momentarily after this GenericObject has been initialized
         self.getnetxitem = NotImplementedError
     self.remainder = self.listofplacestosearch
     self.emptyerror = IndexError
     self.remaindererror = TypeError
Example #7
0
def checkforstoredvector(so: SearchObject):
    """

	the stored vector might not reflect the current math rules

	return False if you are 'outdated'

	hipparchiaDB=# select ts,thumbprint,uidlist from storedvectors;
	        ts          | thumbprint |   uidlist
	---------------------+--------------+--------------
	2018-02-14 20:49:00 | json       | {lt0474w011}
	2018-02-14 20:50:00 | json       | {lt0474w057}
	(2 rows)

	:param so:
	:param vectortype:
	:param careabout:
	:return:
	"""

    currentvectorvalues = so.vectorvalues.getvectorvaluethumbprint()

    vectortype = so.vectorquerytype
    if vectortype == 'analogies':
        vectortype = 'nearestneighborsquery'

    uidlist = so.searchlistthumbprint
    # debugmessage('checkforstoredvector() checking for {u}'.format(u=uidlist))

    dbconnection = ConnectionObject()
    cursor = dbconnection.cursor()

    q = """
	SELECT calculatedvectorspace 
		FROM public.storedvectors 
		WHERE thumbprint=%s AND uidlist=%s AND vectortype=%s AND baggingmethod = %s
	"""
    d = (currentvectorvalues, uidlist, vectortype, so.session['baggingmethod'])

    try:
        cursor.execute(q, d)
        result = cursor.fetchone()
    except psycopg2.ProgrammingError:
        # psycopg2.ProgrammingError: relation "public.storedvectors" does not exist
        createvectorstable()
        result = False
    except psycopg2.errors.UndefinedTable:
        createvectorstable()
        result = False

    if not result:
        # debugmessage('checkforstoredvector(): returning "False"')
        return False

    returnval = pickle.loads(result[0])

    dbconnection.connectioncleanup()
    # debugmessage('checkforstoredvector(): returning a model')

    return returnval
def loadallauthorsasobjects() -> dict:
    """

	return a dict of all possible author objects

	:return:
	"""

    print('loading all authors...', end='')

    dbconnection = ConnectionObject()
    cursor = dbconnection.cursor()

    q = 'SELECT * FROM authors'

    cursor.execute(q)
    results = resultiterator(cursor)

    authorsdict = {r[0]: dbAuthor(*r) for r in results}

    print('\t', len(authorsdict), 'authors loaded', end='')

    dbconnection.connectioncleanup()

    return authorsdict
def loadallworksasobjects() -> dict:
    """

	return a dict of all possible work objects

	:return:
	"""

    print('loading all works...  ', end='')

    dbconnection = ConnectionObject()
    cursor = dbconnection.cursor()

    q = """
	SELECT universalid, title, language, publication_info, levellabels_00, levellabels_01, levellabels_02,
		levellabels_03, levellabels_04, levellabels_05, workgenre, transmission, worktype, provenance, 
		recorded_date, converted_date, wordcount, firstline, lastline, authentic FROM works
	"""

    cursor.execute(q)
    results = resultiterator(cursor)

    worksdict = {r[0]: dbOpus(*r) for r in results}

    print('\t', len(worksdict), 'works loaded', end='')

    dbconnection.connectioncleanup()

    return worksdict
Example #10
0
def probefordatabases() -> dict:
	"""

	figure out which non-author tables are actually installed

	:return:
	"""

	dbconnection = ConnectionObject()
	cursor = dbconnection.cursor()

	available = dict()

	possible = ['greek_dictionary', 'greek_lemmata', 'greek_morphology',
	            'latin_dictionary', 'latin_lemmata', 'latin_morphology',
	            'wordcounts_0']

	for p in possible:
		q = 'SELECT * FROM {table} LIMIT 1'.format(table=p)
		try:
			cursor.execute(q)
			results = cursor.fetchall()
		except psycopg2.ProgrammingError:
			# psycopg2.ProgrammingError: relation "greek_morphology" does not exist
			results = False

		if results:
			available[p] = True
		else:
			available[p] = False

	dbconnection.connectioncleanup()

	return available
Example #11
0
def bulkfindwordcounts(listofwords: List[str]) -> List[dbWordCountObject]:
    """

	note that the lists of words should all start with the same letter since the wordcount tables are letter-keyed

	hipparchiaDB=# CREATE TEMP TABLE bulkcounter_51807f8bbe08 AS SELECT values AS  entriestocheck FROM unnest(ARRAY['κατακλειούϲηϲ', 'κατακλῇϲαι', 'κατακλεῖϲαι']) values;

	hipparchiaDB=# SELECT * FROM wordcounts_κ WHERE EXISTS (SELECT 1 FROM bulkcounter_51807f8bbe08 tocheck WHERE tocheck.entriestocheck = wordcounts_κ.entry_name);
	  entry_name   | total_count | gr_count | lt_count | dp_count | in_count | ch_count
	---------------+-------------+----------+----------+----------+----------+----------
	 κατακλεῖϲαι   |          31 |       30 |        0 |        0 |        1 |        0
	 κατακλειούϲηϲ |           3 |        3 |        0 |        0 |        0 |        0
	 κατακλῇϲαι    |           1 |        1 |        0 |        0 |        0 |        0
	(3 rows)

	:param listofwords:
	:return:
	"""

    dbconnection = ConnectionObject(readonlyconnection=False)
    dbcursor = dbconnection.cursor()

    try:
        firstletteroffirstword = stripaccents(listofwords[0][0])
    except IndexError:
        return list()

    if firstletteroffirstword not in 'abcdefghijklmnopqrstuvwxyzαβψδεφγηιξκλμνοπρϲτυωχθζ':
        firstletteroffirstword = '0'

    tqtemplate = """
	CREATE TEMP TABLE bulkcounter_{rnd} AS
		SELECT values AS 
			entriestocheck FROM unnest(ARRAY[%s]) values
	"""

    uniquename = assignuniquename(12)
    tempquery = tqtemplate.format(rnd=uniquename)
    data = (listofwords, )
    dbcursor.execute(tempquery, data)

    qtemplate = """
	SELECT * FROM wordcounts_{x} WHERE EXISTS 
		(SELECT 1 FROM bulkcounter_{rnd} tocheck WHERE tocheck.entriestocheck = wordcounts_{x}.entry_name)
	"""

    query = qtemplate.format(rnd=uniquename, x=firstletteroffirstword)
    try:
        dbcursor.execute(query)
        results = resultiterator(dbcursor)
    except psycopg2.ProgrammingError:
        # if you do not have the wordcounts installed: 'ProgrammingError: relations "wordcounts_a" does not exist
        results = list()

    wordcountobjects = [dbWordCountObject(*r) for r in results]

    dbconnection.connectioncleanup()

    return wordcountobjects
Example #12
0
def sampleworkcitation(authorid: str, workid: str) -> JSON_STR:
    """

	called by loadsamplecitation() in autocomplete.js

	we are using the maual input style on the web page
	so we need some hint on how to do things: check the end line for a sample citation

	"In Timarchum (w001)" yields...

	127.0.0.1 - - [04/Apr/2021 13:48:53] "GET /get/json/samplecitation/gr0026/001 HTTP/1.1" 200 -
	/get/json/samplecitation
		{"firstline": "1.1", "lastline": "196.7"}

	:param authorid:
	:param workid:
	:return:
	"""
    dbconnection = ConnectionObject()
    dbcursor = dbconnection.cursor()

    returnvals = dict()
    returnvals['firstline'] = str()
    returnvals['lastline'] = str()

    authorid = depunct(authorid)
    workid = depunct(workid)

    try:
        ao = authordict[authorid]
        wo = workdict[authorid + 'w' + workid]
    except KeyError:
        returnvals['firstline'] = 'no such author/work combination'
        return json.dumps(returnvals)

    toplevel = wo.availablelevels - 1
    firstlineindex = returnfirstorlastlinenumber(wo.universalid,
                                                 dbcursor,
                                                 disallowt=True,
                                                 disallowlevel=toplevel)
    flo = dblineintolineobject(
        grabonelinefromwork(authorid, firstlineindex, dbcursor))

    lastlineidx = returnfirstorlastlinenumber(wo.universalid,
                                              dbcursor,
                                              findlastline=True)
    llo = dblineintolineobject(
        grabonelinefromwork(authorid, lastlineidx, dbcursor))

    returnvals['firstline'] = flo.prolixlocus()
    returnvals['lastline'] = llo.prolixlocus()

    results = json.dumps(returnvals)

    dbconnection.connectioncleanup()

    return results
Example #13
0
def trimbypartofspeech(listofwords: List[str], partofspeech: str,
                       baggingmethod: str) -> set:
    """

	return only the verbs, e.g., in a list of words

	:param listofwords:
	:param partofspeech:
	:return:
	"""

    # needs to match list in sessionfunctions.py less 'none'
    trimmingmethods = ['conjugated', 'declined']

    if partofspeech not in trimmingmethods:
        return set(listofwords)

    dbconnection = ConnectionObject()
    dbcursor = dbconnection.cursor()

    morphologyobjecdict = {
        w: lookformorphologymatches(w, dbcursor)
        for w in listofwords
    }
    dbconnection.connectioncleanup()

    # {'serius¹': None, 'solacium': <server.hipparchiaobjects.dbtextobjects.dbMorphologyObject object at 0x155362780>, ... }
    possibilitieslistdict = {
        m: morphologyobjecdict[m].getpossible()
        for m in morphologyobjecdict if morphologyobjecdict[m]
    }

    possible = set()
    if partofspeech == 'conjugated':
        possible = {
            m
            for m in possibilitieslistdict if True in [
                p.isconjugatedverb(bagging=baggingmethod)
                for p in possibilitieslistdict[m]
            ]
        }

    if partofspeech == 'declined':
        possible = {
            m
            for m in possibilitieslistdict if True in [
                p.isnounoradjective(bagging=baggingmethod)
                for p in possibilitieslistdict[m]
            ]
        }

    trimmedlist = set([w for w in listofwords if w in possible])

    return trimmedlist
Example #14
0
def partialworkbetweenclausecontents(
        workobject: dbOpus,
        searchobject: SearchObject) -> Tuple[str, Dict[str, list]]:
    """

	example: Xenophon, Hellenica, Book 1 less Chapter 3

	endpoints ('gr0032w001', {'listofboundaries': [(1, 907)], 'listofomissions': [(257, 349)]})

	:param listofworkobjects:
	:param workswithselections:
	:param searchobject:
	:return:
	"""

    hasselections = [p[0:10] for p in searchobject.psgselections if p]

    dbconnection = ConnectionObject('autocommit')
    dbcursor = dbconnection.cursor()

    blist = list()
    olist = list()
    for sel in searchobject.psgselections:
        if workobject.universalid == sel[0:10]:
            boundariestuple = findselectionboundaries(workobject, sel,
                                                      dbcursor)
            blist.append(boundariestuple)
    for sel in searchobject.psgexclusions:
        if workobject.universalid == sel[0:10]:
            boundariestuple = findselectionboundaries(workobject, sel,
                                                      dbcursor)
            olist.append(boundariestuple)
            if workobject.universalid not in hasselections:
                # if you exclude a subsection, then you implicitly include the whole
                # unless you have only selected a higher level subsection
                # exclude x., mem. 3 means you want to search x., mem.
                # BUT exclude x., mem. 3.4 has a different force if you included x.,  mem. 3
                blist.append((workobject.starts, workobject.ends))

    blist = list(set(blist))
    olist = list(set(olist))

    endpoints = (workobject.universalid, {
        'listofboundaries': blist,
        'listofomissions': olist
    })

    dbconnection.connectioncleanup()

    return endpoints
def bulklexicalgrab(listofwords: List[str], tabletouse: str, targetcolumn: str,
                    language: str) -> list:
    """

	grab a bunch of lex/morph entries by using a temp table

	e.g.,
		lexicalresults = bulklexicalgrab(listofwords, 'dictionary', 'entry_name', language)
		results = bulklexicalgrab(listofwords, 'morphology', 'observed_form', language)

	:param listofwords:
	:param tabletouse:
	:return:
	"""

    dbconnection = ConnectionObject(readonlyconnection=False)
    dbcursor = dbconnection.cursor()

    tqtemplate = """
	CREATE TEMP TABLE bulklex_{rnd} AS
		SELECT values AS 
			entriestocheck FROM unnest(ARRAY[%s]) values
	"""

    uniquename = assignuniquename(12)
    tempquery = tqtemplate.format(rnd=uniquename)
    data = (listofwords, )
    dbcursor.execute(tempquery, data)

    qtemplate = """
	SELECT * FROM {lg}_{thetable} WHERE EXISTS 
		(SELECT 1 FROM bulklex_{rnd} tocheck WHERE tocheck.entriestocheck = {lg}_{thetable}.{target})
	"""

    query = qtemplate.format(rnd=uniquename,
                             thetable=tabletouse,
                             target=targetcolumn,
                             lg=language)

    try:
        dbcursor.execute(query)
        results = resultiterator(dbcursor)
    except psycopg2.ProgrammingError:
        # if you do not have the wordcounts installed: 'ProgrammingError: relations "wordcounts_a" does not exist
        results = list()

    dbconnection.connectioncleanup()

    return results
Example #16
0
def rankheadwordsbyprevalence(listofheadwords: list) -> dict:
    """

	"""

    # print('rankheadwordsbyprevalence() listofheadwords', listofheadwords)

    dbconnection = ConnectionObject(readonlyconnection=False)
    dbconnection.setautocommit()
    dbcursor = dbconnection.cursor()
    rnd = assignuniquename(6)

    tqtemplate = """
	CREATE TEMPORARY TABLE temporary_headwordlist_{rnd} AS
		SELECT headwords AS hw FROM unnest(ARRAY[{allwords}]) headwords
	"""

    qtemplate = """
	SELECT entry_name, total_count FROM {db} 
		WHERE EXISTS 
			(SELECT 1 FROM temporary_headwordlist_{rnd} temptable WHERE temptable.hw = {db}.entry_name)
	"""

    tempquery = tqtemplate.format(rnd=rnd, allwords=list(listofheadwords))
    dbcursor.execute(tempquery)
    # https://www.psycopg.org/docs/extras.html#psycopg2.extras.execute_values
    # third parameter is

    query = qtemplate.format(rnd=rnd, db='dictionary_headword_wordcounts')
    dbcursor.execute(query)
    results = resultiterator(dbcursor)

    ranked = {r[0]: r[1] for r in results}

    # you have a problem: you just tossed a bunch of headwords that did not have good prevalence data
    # discovered when Ϲωκράτηϲ went missing from Plato

    r = set(ranked.keys())
    h = set(listofheadwords)
    delta = h - r

    nullranked = {d: 0 for d in delta}

    ranked = {**ranked, **nullranked}

    return ranked
Example #17
0
def createvectorstable():
    """

	zap and reconstitute the storedvectors table

	:return:
	"""

    consolewarning('resetting the stored vectors table', color='green')

    dbconnection = ConnectionObject(ctype='rw')
    dbcursor = dbconnection.cursor()

    query = """
	DROP TABLE IF EXISTS public.storedvectors;

	CREATE TABLE public.storedvectors
	(
		ts timestamp without time zone,
		thumbprint character varying(32) COLLATE pg_catalog."default",
		uidlist character varying(32) COLLATE pg_catalog."default",
		vectortype character varying(24) COLLATE pg_catalog."default",
		baggingmethod character varying(24) COLLATE pg_catalog."default",
		calculatedvectorspace bytea
	)
	WITH (
		OIDS = FALSE
	)
	TABLESPACE pg_default;
	
	ALTER TABLE public.storedvectors
		OWNER to hippa_wr;
	
	GRANT SELECT ON TABLE public.storedvectors TO {reader};
	
	GRANT ALL ON TABLE public.storedvectors TO {writer};
	"""

    query = query.format(reader=hipparchia.config['DBUSER'],
                         writer=hipparchia.config['DBWRITEUSER'])

    dbcursor.execute(query)

    dbconnection.connectioncleanup()

    return
Example #18
0
def monobreaktextsintosentences(searchlist: list, searchobject) -> List[tuple]:
    """

	A wrapper for breaktextsintosentences() since Windows can't MP it...

	findsentences() results[0] ('line/gr0014w001/1', 'ἀντὶ πολλῶν ἄν ὦ ἄνδρεϲ ἀθηναῖοι χρημάτων ὑμᾶϲ ἑλέϲθαι νομίζω εἰ φανερὸν γένοιτο τὸ μέλλον ϲυνοίϲειν τῇ πόλει περὶ ὧν νυνὶ ϲκοπεῖτε')

	:param searchlist:
	:param searchobject:
	:return:
	"""
    foundsentences = list()
    dbconnection = ConnectionObject(readonlyconnection=False)
    foundsentences = breaktextsintosentences(foundsentences, searchlist,
                                             searchobject, dbconnection)
    dbconnection.connectioncleanup()
    fs = list(foundsentences)
    return fs
def loadlemmataasobjects() -> dict:
    """

	return a dict of all possible lemmataobjects

	hipparchiaDB=# select * from greek_lemmata limit 1;
	 dictionary_entry | xref_number |    derivative_forms
	------------------+-------------+------------------------
	 ζῳοτροφία        |    49550639 | {ζῳοτροφίᾳ,ζῳοτροφίαϲ}

	:return:
	"""

    print('loading all lemmata...', end=str())
    dbconnection = ConnectionObject()
    cursor = dbconnection.cursor()

    q = """
	SELECT dictionary_entry, xref_number, derivative_forms FROM {lang}_lemmata
	"""

    lemmatadict = dict()

    languages = {1: 'greek', 2: 'latin'}

    for key in languages:
        cursor.execute(q.format(lang=languages[key]))
        results = resultiterator(cursor)
        lemmatadict = {
            **{r[0]: dbLemmaObject(*r)
               for r in results},
            **lemmatadict
        }

    print('\t', len(lemmatadict), 'lemmata loaded', end=str())
    # print('lemmatadict["molestus"]', lemmatadict['molestus'].formlist)
    # print('lemmatadict["Mausoleus"]', lemmatadict['Mausoleus'].formlist)
    # print('lemmatadict["λύω"]', lemmatadict['λύω'].formlist)
    # print('lemmatadict["Δημοϲθένηϲ"]', lemmatadict['Δημοϲθένηϲ'].formlist)

    dbconnection.connectioncleanup()

    return lemmatadict
Example #20
0
def findworkstructure(author, work, passage=None) -> JSON_STR:
    """
	request detailed info about how a work works
	this is fed back to the js boxes : who should be active, what are the autocomplete values, etc?

	127.0.0.1 - - [04/Apr/2021 13:36:16] "GET /get/json/workstructure/lt0474/037 HTTP/1.1" 200 -
	/get/json/workstructure
		{"totallevels": 3, "level": 2, "label": "book", "low": "1", "high": "3", "range": ["1", "2", "3"]}

	:return:
	"""

    dbconnection = ConnectionObject()
    dbcursor = dbconnection.cursor()

    po = StructureInputParsingObject(author, work, passage)
    wo = po.workobject

    ws = dict()
    if wo:
        lowandhigh = findvalidlevelvalues(wo, po.getcitationtuple(), dbcursor)
        # example: (4, 3, 'Book', '1', '7', ['1', '2', '3', '4', '5', '6', '7'])
        ws['totallevels'] = lowandhigh.levelsavailable
        ws['level'] = lowandhigh.currentlevel
        ws['label'] = lowandhigh.levellabel
        ws['low'] = lowandhigh.low
        ws['high'] = lowandhigh.high
        ws['range'] = lowandhigh.valuerange
    else:
        # (2, 0, 'verse', '1', '100')
        ws['totallevels'] = 1
        ws['level'] = 0
        ws['label'] = 'Error: repick the work'
        ws['low'] = 'Error:'
        ws['high'] = 'again'
        ws['range'] = ['error', 'select', 'the', 'work', 'again']

    results = json.dumps(ws)

    dbconnection.connectioncleanup()

    return results
Example #21
0
def createstoredimagestable():
    """

	zap and reconstitute the storedimages table

	:return:
	"""

    consolewarning('resetting the stored images table', color='green')

    dbconnection = ConnectionObject(ctype='rw')
    dbcursor = dbconnection.cursor()

    query = """
	DROP TABLE IF EXISTS public.storedvectorimages;
	
	CREATE TABLE public.storedvectorimages
	(
		imagename character varying(12),
		imagedata bytea
	)
	WITH (
		OIDS = FALSE
	)
	TABLESPACE pg_default;
	
	ALTER TABLE public.storedvectorimages
		OWNER to hippa_wr;
	
	GRANT SELECT ON TABLE public.storedvectorimages TO {reader};
	
	GRANT ALL ON TABLE public.storedvectorimages TO {writer};
	"""

    query = query.format(reader=hipparchia.config['DBUSER'],
                         writer=hipparchia.config['DBWRITEUSER'])

    dbcursor.execute(query)

    dbconnection.connectioncleanup()

    return
Example #22
0
def breaktextsintosentences(foundsentences: ListProxy, searchlist: ListProxy,
                            so: SearchObject,
                            dbconnection: ConnectionObject) -> List[tuple]:
    """

	break a text into sentences that contain the term we are looking for

	that is, findsentences() both chunks and searches

	findsentences() results[0] ('line/gr0014w001/1', 'ἀντὶ πολλῶν ἄν ὦ ἄνδρεϲ ἀθηναῖοι χρημάτων ὑμᾶϲ ἑλέϲθαι νομίζω εἰ φανερὸν γένοιτο τὸ μέλλον ϲυνοίϲειν τῇ πόλει περὶ ὧν νυνὶ ϲκοπεῖτε')

	:param foundsentences:
	:param searchlist:
	:param activepoll:
	:param searchobject:
	:return:
	"""

    activepoll = so.poll

    dbcursor = dbconnection.cursor()

    commitcount = 0
    while searchlist:
        commitcount += 1
        try:
            authortable = searchlist.pop()
        except IndexError:
            authortable = None

        if authortable:
            foundsentences.extend(findsentences(authortable, so, dbcursor))

            dbconnection.checkneedtocommit(commitcount)

        try:
            activepoll.remain(len(searchlist))
        except TypeError:
            pass

    return foundsentences
Example #23
0
def findparserxref(wordobject) -> str:
    """

	used in LEXDEBUGMODE to find the parser xrefvalue for a headword

	:param entryname:
	:return:
	"""

    dbconnection = ConnectionObject()
    dbcursor = dbconnection.cursor()

    if wordobject.isgreek():
        lang = 'greek'
    else:
        lang = 'latin'

    trimmedentry = re.sub(r'[¹²³⁴⁵⁶⁷⁸⁹]', '', wordobject.entry)

    q = 'SELECT * FROM {lang}_lemmata WHERE dictionary_entry=%s'.format(
        lang=lang)
    d = (wordobject.entry, )
    dbcursor.execute(q, d)
    results = dbcursor.fetchall()

    if not results:
        d = (trimmedentry, )
        dbcursor.execute(q, d)
        results = dbcursor.fetchall()

    # it is not clear that more than one item will ever be returned
    # but if that happened, you need to be ready to deal with it
    lemmaobjects = [dbLemmaObject(*r) for r in results]
    xrefs = [str(l.xref) for l in lemmaobjects]

    xrefvalues = ', '.join(xrefs)

    dbconnection.connectioncleanup()

    return xrefvalues
Example #24
0
def querytotalwordcounts(word: str, dbcursor=None) -> dbHeadwordObject:
    """

	use the dictionary_headword_wordcounts table

	[a] take a dictionary entry: ἄκρατοϲ
	[b] look it up

	return a countobject

	:param word:
	:param dbcursor:
	:return:
	"""

    dbconnection = None
    if not dbcursor:
        dbconnection = ConnectionObject()
        dbconnection.setautocommit()
        dbcursor = dbconnection.cursor()

    table = 'dictionary_headword_wordcounts'
    qtemplate = """
		SELECT 
			entry_name , total_count, gr_count, lt_count, dp_count, in_count, ch_count,
			frequency_classification, early_occurrences, middle_occurrences ,late_occurrences, 
			acta, agric, alchem, anthol, apocalyp, apocryph, apol, astrol, astron, biogr, bucol, caten, chronogr, comic, comm, 
			concil, coq, dialog, docu, doxogr, eccl, eleg, encom, epic, epigr, epist, evangel, exeget, fab, geogr, gnom, gramm, 
			hagiogr, hexametr, hist, homilet, hymn, hypoth, iamb, ignotum, invectiv, inscr, jurisprud, lexicogr, liturg, lyr, 
			magica, math, mech, med, metrolog, mim, mus, myth, narrfict, nathist, onir, orac, orat, paradox, parod, paroem, 
			perieg, phil, physiognom, poem, polyhist, prophet, pseudepigr, rhet, satura, satyr, schol, tact, test, theol, trag
		FROM {tbl} WHERE entry_name=%s
	"""

    q = qtemplate.format(tbl=table)
    d = (word, )
    try:
        dbcursor.execute(q, d)
        hw = dbcursor.fetchone()
    except psycopg2.ProgrammingError:
        # psycopg2.ProgrammingError: relation "dictionary_headword_wordcounts" does not exist
        # you have not installed the wordcounts (yet)
        hw = None

    try:
        hwcountobject = dbHeadwordObject(*hw)
    except:
        # print('failed to initialize dbHeadwordObject for', word)
        hwcountobject = None

    if dbconnection:
        dbconnection.connectioncleanup()

    return hwcountobject
def findchronologicalweights(era: str, language: str) -> int:
	"""

	an initial call to dictionary_headword_wordcounts to figure out the relative weight of the different eras

	how many words are 'early' / 'middle' / 'late'

	:param era:
	:param language:
	:return:
	"""

	dbconnection = ConnectionObject('autocommit')
	cursor = dbconnection.cursor()

	eramap = {
		'early': 'early_occurrences',
		'middle': 'middle_occurrences',
		'late': 'late_occurrences'
	}

	try:
		theera = eramap[era]
	except KeyError:
		return -1

	q = 'SELECT SUM({e}) FROM dictionary_headword_wordcounts WHERE entry_name ~ %s'.format(e=theera)

	if language == 'G':
		d = ('^[^a-z]',)
	else:
		d = ('^[a-z]',)

	cursor.execute(q, d)
	thesum = cursor.fetchall()
	thesum = thesum[0][0]

	return thesum
Example #26
0
def fetchvectorgraph(imagename) -> bytes:
	"""

	grab a graph in the image table so that you can subsequently display it in the browser

	note that images get deleted after use

	also note that we hand the data to the db and then immediatel grab it out of the db because of
	constraints imposed by the way flask works

	:param imagename:
	:return:
	"""

	if hipparchia.config['RETAINFIGURES']:
		deletewhendone = False
	else:
		deletewhendone = True

	dbconnection = ConnectionObject(ctype='rw')
	dbconnection.setautocommit()
	cursor = dbconnection.cursor()

	q = 'SELECT imagedata FROM public.storedvectorimages WHERE imagename=%s'
	d = (imagename,)

	cursor.execute(q, d)

	imagedata = cursor.fetchone()
	# need to convert to bytes, otherwise:
	# AttributeError: 'memoryview' object has no attribute 'read'
	try:
		imagedata = bytes(imagedata[0])
	except TypeError:
		# TypeError: 'NoneType' object is not subscriptable
		# how did this happen...
		# if you right click and download a graph in Firefox it will try to pull via the URL
		# but that figure is almost certainly gone unless you are a debugger retaining figures...
		imagedata = b''
		consolewarning('fetchvectorgraph() failed to fetch image {i}'.format(i=imagename))

	# print('fetched {n} from vector image table'.format(n=randomid))

	# now we should delete the image because we are done with it

	if deletewhendone:
		q = 'DELETE FROM public.storedvectorimages WHERE imagename=%s'
		d = (imagename,)
		cursor.execute(q, d)

	dbconnection.connectioncleanup()

	return imagedata
def precomposedsqlsearchmanager(so: SearchObject) -> List[dbWorkLine]:
    """

    quick and dirty dispatcher

    note that you need so.searchsqldict to be properly configured before you get here

    """

    activepoll = so.poll

    workers = setthreadcount()

    manager = Manager()
    foundlineobjects = manager.list()

    searchsqlbyauthor = [so.searchsqldict[k] for k in so.searchsqldict.keys()]
    searchsqlbyauthor = manager.list(searchsqlbyauthor)

    activepoll.allworkis(len(searchsqlbyauthor))
    activepoll.remain(len(searchsqlbyauthor))
    activepoll.sethits(0)

    argumentuple = [foundlineobjects, searchsqlbyauthor, so]

    if icanpickleconnections():
        oneconnectionperworker = {i: ConnectionObject() for i in range(workers)}
    else:
        oneconnectionperworker = {i: None for i in range(workers)}

    argumentswithconnections = [tuple([i] + list(argumentuple) + [oneconnectionperworker[i]]) for i in range(workers)]

    if platform.system() == 'Windows':
        # windows hates multiprocessing; but in practice windows should never be coming here: HipparchiaGoDBHelper...
        return workonprecomposedsqlsearch(*argumentswithconnections[0])

    jobs = [Process(target=workonprecomposedsqlsearch, args=argumentswithconnections[i]) for i in range(workers)]

    for j in jobs:
        j.start()
    for j in jobs:
        j.join()

    # generator needs to turn into a list
    foundlineobjects = list(foundlineobjects)

    for c in oneconnectionperworker:
        oneconnectionperworker[c].connectioncleanup()

    return foundlineobjects
Example #28
0
def vectorprepdispatcher(so: SearchObject) -> List[tuple]:
    """

	assign the vector prep to multiprocessing workers
		searchobject:
			<server.hipparchiaclasses.SearchObject object at 0x1102c15f8>
		activepoll:
			<server.hipparchiaclasses.ProgressPoll object at 0x1102c15f8>

	findsentences() results[0] ('line/gr0014w001/1', 'ἀντὶ πολλῶν ἄν ὦ ἄνδρεϲ ἀθηναῖοι χρημάτων ὑμᾶϲ ἑλέϲθαι νομίζω εἰ φανερὸν γένοιτο τὸ μέλλον ϲυνοίϲειν τῇ πόλει περὶ ὧν νυνὶ ϲκοπεῖτε')

	:param searchobject:
	:param activepoll:
	:return:
	"""

    if platform.system() == 'Windows':
        # otherwise: RecursionError: maximum recursion depth exceeded while calling a Python object
        searchlist = list(so.indexrestrictions.keys())
        return monobreaktextsintosentences(searchlist, so)

    manager = Manager()
    foundsentences = manager.list()
    listofitemstosearch = manager.list(so.indexrestrictions.keys())

    workers = setthreadcount()

    targetfunction = breaktextsintosentences

    connections = {
        i: ConnectionObject(readonlyconnection=False)
        for i in range(workers)
    }

    jobs = [
        Process(target=targetfunction,
                args=(foundsentences, listofitemstosearch, so, connections[i]))
        for i in range(workers)
    ]

    for j in jobs:
        j.start()
    for j in jobs:
        j.join()

    for c in connections:
        connections[c].connectioncleanup()

    fs = list(foundsentences)
    return fs
Example #29
0
def findcountsviawordcountstable(wordtocheck):
    """

	used to look up a list of specific observed forms
	(vs. dictionary headwords)

	:param wordtocheck:
	:return:
	"""

    dbconnection = ConnectionObject()
    dbcursor = dbconnection.cursor()

    initial = stripaccents(wordtocheck[0])
    # alternatives = re.sub(r'[uv]','[uv]',c)
    # alternatives = '^'+alternatives+'$'
    if initial in 'abcdefghijklmnopqrstuvwxyzαβψδεφγηιξκλμνοπρϲτυωχθζ':
        # note that we just lost "'φερον", "'φερεν", "'φέρεν", "'φερεϲ", "'φερε",...
        # but the punctuation killer probably zapped them long ago
        # this needs to be addressed in HipparchiaBuilder
        q = 'SELECT * FROM wordcounts_{i} WHERE entry_name = %s'.format(
            i=initial)
    else:
        q = 'SELECT * FROM wordcounts_0 WHERE entry_name = %s'

    d = (wordtocheck, )
    try:
        dbcursor.execute(q, d)
        result = dbcursor.fetchone()
    except psycopg2.ProgrammingError:
        # psycopg2.ProgrammingError: relation "wordcounts_ε" does not exist
        # you did not build the wordcounts at all?
        result = None

    dbconnection.connectioncleanup()

    return result
def findcorpusweightviawordcounts(corpus: str) -> int:
	"""

	hipparchiaDB=> select * from wordcounts_θ where entry_name='θυγατριδοῦϲ';
	 entry_name  | total_count | gr_count | lt_count | dp_count | in_count | ch_count
	-------------+-------------+----------+----------+----------+----------+----------
	 θυγατριδοῦϲ |         128 |      120 |        0 |        3 |        5 |        0
	(1 row)

	:param corpus:
	:param language:
	:return:
	"""
	letters = {'G': 'αβψδεφγηιξκλμνοπρϲτυωχθζ',
	           'L': 'abcdefghijklmnopqrstuvxyz',
	           'A': '0abcdefghijklmnopqrstuvwxyzαβψδεφγηιξκλμνοπρϲτυωχθζ'}

	dbconnection = ConnectionObject('autocommit')
	cursor = dbconnection.cursor()

	myletters = letters['A']

	corpora = {'gr': 'gr_count', 'lt': 'lt_count', 'dp': 'dp_count', 'in': 'in_count', 'ch': 'ch_count'}
	c = corpora[corpus]

	mycount = 0
	for l in myletters:
		q = 'SELECT SUM({c}) FROM wordcounts_{l}'.format(c=c, l=l)
		cursor.execute(q)
		thesum = cursor.fetchall()
		try:
			mycount += thesum[0][0]
		except TypeError:
			# TypeError: unsupported operand type(s) for +=: 'int' and 'NoneType'
			pass
	return mycount