예제 #1
0
파일: set_cover.py 프로젝트: cceh/ntg
def init_app (app):
    """ Init the Flask app. """

    app.config.val = None
    app.config.set_cover_rg_id = None

    with app.config.dba.engine.begin () as conn:
        try:
            res = execute (conn, """
            SELECT bk_id
            FROM books
            WHERE book = :book
            """, { 'book' : app.config['BOOK'] })
            bk_id = res.fetchone ()[0]

            res = execute (conn, """
            SELECT rg_id
            FROM ranges
            WHERE bk_id = :bk_id AND range = 'All'
            """, { 'bk_id' : bk_id })
            rg_id = res.fetchone ()[0]

            app.config.set_cover_rg_id = rg_id
        except:
            pass # FIXME
예제 #2
0
    def __init__(self, conn, passage_or_id):
        """ Initialize from passage or passage id. """

        self.conn = conn
        self.pass_id, self.start, self.end, self.bk_id, self.chapter = 0, 0, 0, 0, 0
        start, end = self.fix(str(passage_or_id))

        if int(start) > 10000000:
            res = execute(
                conn, """
            SELECT pass_id, begadr, endadr, adr2bk_id (begadr), adr2chapter (begadr)
            FROM passages
            WHERE begadr = :begadr AND endadr = :endadr
            """, dict(parameters, begadr=start, endadr=end))
        else:
            res = execute(
                conn, """
            SELECT pass_id, begadr, endadr, adr2bk_id (begadr), adr2chapter (begadr)
            FROM passages
            WHERE pass_id = :pass_id
            """, dict(parameters, pass_id=start))

        row = res.fetchone()
        if row is not None:
            self.pass_id, self.start, self.end, self.bk_id, self.chapter = row
예제 #3
0
파일: import.py 프로젝트: cceh/ntg
def import_genealogical_fdw (dbsrc, dbdest, parameters):
    """Import genealogical tables from mysql.

    Import the (28 * 3) mysql tables to 3 tables in the postgres database.

    """

    if not config.get ('MYSQL_VG_DB'):
        return

    dbsrc_meta = sqlalchemy.schema.MetaData (bind = dbsrc.engine)
    dbsrc_meta.reflect ()

    with dbdest.engine.begin () as dest:
        if config.get ('MYSQL_LOCSTEM_TABLES'):
            log (logging.INFO, "  Importing mysql locstem tables ...")
            concat_tables_fdw (dest, dbsrc_meta, 'original_locstemed', 'var_fdw', config['MYSQL_LOCSTEM_TABLES'])

    with dbdest.engine.begin () as dest:
        if config.get ('MYSQL_RDG_TABLES'):
            log (logging.INFO, "  Importing mysql rdg tables ...")
            concat_tables_fdw (dest, dbsrc_meta, 'original_rdg',       'var_fdw', config['MYSQL_RDG_TABLES'])

    with dbdest.engine.begin () as dest:
        if config.get ('MYSQL_VAR_TABLES'):
            log (logging.INFO, "  Importing mysql var tables ...")
            concat_tables_fdw (dest, dbsrc_meta, 'original_var',       'var_fdw', config['MYSQL_VAR_TABLES'])

    with dbdest.engine.begin () as dest:
        if config.get ('MYSQL_MEMO_TABLE'):
            log (logging.INFO, "  Importing mysql memo table ...")
            copy_table_fdw    (dest,             'original_memo',      'var_fdw', config['MYSQL_MEMO_TABLE'])
            execute (dest, """
            ALTER TABLE original_memo RENAME COLUMN anfadr TO begadr;
            """, parameters)
예제 #4
0
파일: import.py 프로젝트: cceh/ntg
def import_att_fdw (dbsrc, dbdest, parameters):
    """Import att and lac tables from mysql.

    Import the (28 * 2) mysql tables to 2 tables in the postgres database.

    """

    log (logging.INFO, "  Importing mysql att tables ...")

    dbsrc_meta = sqlalchemy.schema.MetaData (bind = dbsrc.engine)
    dbsrc_meta.reflect ()

    with dbdest.engine.begin () as dest:
        concat_tables_fdw (dest, dbsrc_meta, 'original_att', 'app_fdw', config['MYSQL_ATT_TABLES'])

    with dbdest.engine.begin () as dest:
        if config.get ('MYSQL_LAC_TABLES'):
            log (logging.INFO, "  Importing mysql lac tables ...")
            concat_tables_fdw (dest, dbsrc_meta, 'original_lac', 'app_fdw', config['MYSQL_LAC_TABLES'])
        else:
            # no lacuna tables provided (eg. John)
            execute (dest, """
	        DROP TABLE IF EXISTS original_lac;
	        CREATE TABLE original_lac (LIKE original_att);
            """, parameters)

    with dbdest.engine.begin () as dest:
        execute (dest, """
        ALTER TABLE original_att RENAME COLUMN anfadr TO begadr;
        ALTER TABLE original_lac RENAME COLUMN anfadr TO begadr;
        """, parameters)
예제 #5
0
def import_att_fdw(dbsrc, dbdest, parameters):
    """Import att and lac tables from mysql.

    Import the (28 * 2) mysql tables to 2 tables in the postgres database.

    """

    log(logging.INFO, "  Importing mysql att tables ...")

    dbsrc_meta = sqlalchemy.schema.MetaData(bind=dbsrc.engine)
    dbsrc_meta.reflect()

    with dbdest.engine.begin() as dest:
        concat_tables_fdw(dest, dbsrc_meta, 'original_att', 'app_fdw',
                          config['MYSQL_ATT_TABLES'])

    with dbdest.engine.begin() as dest:
        if config.get('MYSQL_LAC_TABLES'):
            log(logging.INFO, "  Importing mysql lac tables ...")
            concat_tables_fdw(dest, dbsrc_meta, 'original_lac', 'app_fdw',
                              config['MYSQL_LAC_TABLES'])
        else:
            # no lacuna tables provided (eg. John)
            execute(
                dest, """
	        DROP TABLE IF EXISTS original_lac;
	        CREATE TABLE original_lac (LIKE original_att);
            """, parameters)

    with dbdest.engine.begin() as dest:
        execute(
            dest, """
        ALTER TABLE original_att RENAME COLUMN anfadr TO begadr;
        ALTER TABLE original_lac RENAME COLUMN anfadr TO begadr;
        """, parameters)
예제 #6
0
파일: main.py 프로젝트: devolt5/ntg
def init_app(app):
    """ Initialize the flask app. """

    app.config.bk_id = None
    app.config.rg_id_all = None

    with app.config.dba.engine.begin() as conn:
        try:
            res = execute(
                conn, """
            SELECT bk_id
            FROM books
            WHERE book = :book
            """, {'book': app.config['BOOK']})
            app.config.bk_id = res.fetchone()[0]

            res = execute(
                conn, """
            SELECT rg_id
            FROM ranges
            WHERE bk_id = :bk_id AND range = 'All'
            """, {'bk_id': app.config.bk_id})
            rg_id = res.fetchone()[0]

            app.config.rg_id_all = rg_id
        except:
            pass  # FIXME
예제 #7
0
파일: editor.py 프로젝트: devolt5/ntg
def notes_txt(passage_or_id):
    """Read or write the editor notes for a passage

    """

    private_auth()

    with current_app.config.dba.engine.begin() as conn:
        passage = Passage(conn, passage_or_id)

        if request.method == 'PUT':

            edit_auth()
            json = request.get_json()

            res = execute(
                conn, """
            SET LOCAL ntg.user_id = :user_id;
            """, dict(parameters, user_id=flask_login.current_user.id))

            # check for edit conflicts
            res = execute(
                conn, """
            SELECT * FROM notes
            WHERE pass_id = :pass_id AND note != :old_note
            """,
                dict(parameters,
                     pass_id=passage.pass_id,
                     old_note=json['original']))
            for row in res:
                return make_json_response(
                    status=409,
                    message='Cannot save. The note was edited by another user.'
                )

            # save
            res = execute(
                conn, """
            INSERT INTO notes AS n (pass_id, note)
            VALUES (:pass_id, :note)
            ON CONFLICT (pass_id) DO
            UPDATE
            SET note = :note
            WHERE n.pass_id = EXCLUDED.pass_id
            """, dict(parameters, pass_id=passage.pass_id, note=json['note']))

            return make_json_response(message='Note saved.')

        res = execute(
            conn, """
        SELECT note
        FROM notes
        WHERE pass_id = :pass_id
        """, dict(parameters, pass_id=passage.pass_id))

        if res.rowcount > 0:
            return make_text_response(res.fetchone()[0])
        return make_text_response('')
예제 #8
0
파일: import.py 프로젝트: cceh/ntg
def copy_table_fdw (conn, dest_table, fdw, source_table):
    """Copy a table. """

    execute (conn, """
    DROP TABLE IF EXISTS {dest_table};
    """, dict (parameters, dest_table = dest_table))

    execute (conn, """
    SELECT * INTO {dest_table} FROM  {fdw}."{source_table}"
    """, dict (parameters, fdw = fdw, dest_table = dest_table, source_table = source_table))
예제 #9
0
파일: main.py 프로젝트: cceh/ntg
def manuscript_full_json (passage_or_id, hs_hsnr_id):
    """Endpoint.  Serve information about a manuscript.

    :param string hs_hsnr_id: The hs, hsnr or id of the manuscript.

    """

    auth ()

    hs_hsnr_id = request.args.get ('ms_id') or hs_hsnr_id
    chapter    = request.args.get ('range') or 'All'

    with current_app.config.dba.engine.begin () as conn:
        passage   = Passage (conn, passage_or_id)
        ms        = Manuscript (conn, hs_hsnr_id)
        rg_id     = passage.range_id (chapter)

        json = ms.to_json ()
        json['length'] = ms.get_length (passage, chapter)

        # Get the attestation(s) of the manuscript (may be uncertain eg. a/b/c)
        res = execute (conn, """
        SELECT labez, clique, labez_clique
        FROM apparatus_view_agg
        WHERE ms_id = :ms_id AND pass_id = :pass_id
        """, dict (parameters, ms_id = ms.ms_id, pass_id = passage.pass_id))
        json['labez'], json['clique'], json['labez_clique'] = res.fetchone ()

        # Get the affinity of the manuscript to all manuscripts
        res = execute (conn, """
        SELECT avg (a.affinity) as aa,
        percentile_cont(0.5) WITHIN GROUP (ORDER BY a.affinity) as ma
        FROM affinity a
        WHERE a.ms_id1 = :ms_id1 AND a.rg_id = :rg_id
        """, dict (parameters, ms_id1 = ms.ms_id, rg_id = rg_id))
        json['aa'], json['ma'] = res.fetchone ()

        # Get the affinity of the manuscript to MT
        #
        # For a description of mt and mtp see the comment in
        # ActsMsListValPh3.pl and
        # http://intf.uni-muenster.de/cbgm/actsPh3/guide_en.html#Ancestors

        json['mt'], json['mtp'] = 0.0, 0.0
        res = execute (conn, """
        SELECT a.affinity as mt, a.equal::float / c.length as mtp
        FROM affinity a
        JOIN ms_ranges c
          ON (a.ms_id1, a.rg_id) = (c.ms_id, c.rg_id)
        WHERE a.ms_id1 = :ms_id1 AND a.ms_id2 = 2 AND a.rg_id = :rg_id
        """, dict (parameters, ms_id1 = ms.ms_id, rg_id = rg_id))
        if res.rowcount > 0:
            json['mt'], json['mtp'] = res.fetchone ()

        return make_json_response (json)
예제 #10
0
파일: set_cover.py 프로젝트: devolt5/ntg
def init(db):
    """ Do some preparative calculations and cache the results. """

    val = CBGM_Params()

    with db.engine.begin() as conn:
        # get max number of different cliques in any one passage
        res = execute(
            conn, """
        SELECT MAX (c)
        FROM (
          SELECT COUNT ((labez, clique)) AS c
          FROM locstem
          WHERE labez !~ '^z'
          GROUP BY pass_id
        ) AS foo
        """, {})
        n_cliques = res.fetchone()[0]
        # see that the bitmask fits into uint64
        # one bit is reserved for 'unknown' derivation
        assert n_cliques < 64

        # load all attestations into one big numpy array
        create_labez_matrix(db, {}, val)

        # build a mask of all readings of all mss.
        # every labez_clique gets an id (in the range 1..63)

        # Matrix mss x passages containing the bitmask of all manuscripts readings
        val.mask_matrix = np.zeros((val.n_mss, val.n_passages),
                                   dtype=np.uint64)

        res = execute(
            conn, """
        WITH rn AS (
          {with}
        )
        SELECT msq.ms_id, msq.pass_id, rn1.rn
        FROM ms_cliques AS msq
        JOIN (select * from rn) as rn1
          USING (pass_id, labez, clique)
        """, {'with': WITH_SELECT})

        mask_row = collections.namedtuple('Mask_Row', 'ms_id, pass_id, shift')
        for r in res:
            mask = mask_row._make(r)
            val.mask_matrix[mask.ms_id - 1,
                            mask.pass_id - 1] = np.uint64(mask.shift)

    return val
예제 #11
0
def copy_table_fdw(conn, dest_table, fdw, source_table):
    """Copy a table. """

    execute(conn, """
    DROP TABLE IF EXISTS {dest_table};
    """, dict(parameters, dest_table=dest_table))

    execute(
        conn, """
    SELECT * INTO {dest_table} FROM  {fdw}."{source_table}"
    """,
        dict(parameters,
             fdw=fdw,
             dest_table=dest_table,
             source_table=source_table))
예제 #12
0
파일: comparison.py 프로젝트: cceh/ntg
def comparison_detail ():
    """Output comparison of 2 witnesses, chapter detail.

    Outputs a detail of the differences between 2 manuscripts in one chapter.
    """

    with current_app.config.dba.engine.begin () as conn:
        ms1 = Manuscript (conn, request.args.get ('ms1') or 'A')
        ms2 = Manuscript (conn, request.args.get ('ms2') or 'A')
        range_ = request.args.get ('range') or 'All'

        res = execute (conn, """
        SELECT p.pass_id, p.begadr, p.endadr, v1.labez_clique, v1.lesart,
                                              v2.labez_clique, v2.lesart,
          is_p_older (p.pass_id, v1.labez, v1.clique, v2.labez, v2.clique) AS older,
          is_p_older (p.pass_id, v2.labez, v2.clique, v1.labez, v1.clique) AS newer,
          is_p_unclear (p.pass_id, v1.labez, v1.clique) OR
          is_p_unclear (p.pass_id, v2.labez, v2.clique) AS unclear
        FROM (SELECT * FROM ranges WHERE range = :range_) r
          JOIN passages p ON (r.passage @> p.passage )
          JOIN apparatus_cliques_view v1 USING (pass_id)
          JOIN apparatus_cliques_view v2 USING (pass_id)
        WHERE v1.ms_id = :ms1 AND v2.ms_id = :ms2
          AND v1.labez != v2.labez AND v1.labez !~ '^z' AND v2.labez !~ '^z'
          AND v1.cbgm AND v2.cbgm
        ORDER BY p.pass_id
        """, dict (parameters, ms1 = ms1.ms_id, ms2 = ms2.ms_id, range_ = range_))

        return list (map (_ComparisonDetailRowCalcFields._make, res))
예제 #13
0
def comparison_detail ():
    """Output comparison of 2 witnesses, chapter detail.

    Outputs a detail of the differences between 2 manuscripts in one chapter.
    """

    with current_app.config.dba.engine.begin () as conn:
        ms1 = Manuscript (conn, request.args.get ('ms1') or 'A')
        ms2 = Manuscript (conn, request.args.get ('ms2') or 'A')
        range_ = request.args.get ('range') or 'All'

        res = execute (conn, """
        SELECT p.pass_id, p.begadr, p.endadr, v1.labez_clique, v1.lesart,
                                              v2.labez_clique, v2.lesart,
          is_p_older (p.pass_id, v1.labez, v1.clique, v2.labez, v2.clique) AS older,
          is_p_older (p.pass_id, v2.labez, v2.clique, v1.labez, v1.clique) AS newer,
          is_p_unclear (p.pass_id, v1.labez, v1.clique) OR
          is_p_unclear (p.pass_id, v2.labez, v2.clique) AS unclear
        FROM (SELECT * FROM ranges WHERE range = :range_) r
          JOIN passages p ON (r.passage @> p.passage )
          JOIN apparatus_cliques_view v1 USING (pass_id)
          JOIN apparatus_cliques_view v2 USING (pass_id)
        WHERE v1.ms_id = :ms1 AND v2.ms_id = :ms2
          AND v1.labez != v2.labez AND v1.labez !~ '^z' AND v2.labez !~ '^z'
          AND v1.cbgm AND v2.cbgm
        ORDER BY p.pass_id
        """, dict (parameters, ms1 = ms1.ms_id, ms2 = ms2.ms_id, range_ = range_))

        return list (map (_ComparisonDetailRowCalcFields._make, res))
예제 #14
0
파일: helpers.py 프로젝트: cceh/ntg
    def __init__ (self, conn, manuscript_id_or_hs_or_hsnr):
        """ Initialize from manuscript id or hs or hsnr. """

        self.conn = conn
        self.ms_id = self.hs = self.hsnr = None
        param = manuscript_id_or_hs_or_hsnr

        if Manuscript.RE_HSNR.search (param):
            where = 'hsnr = :param'
            param = int (param)
        elif Manuscript.RE_MSID.search (param):
            where = 'ms_id = :param'
            param = int (param[2:])
        elif Manuscript.RE_HS.search (param):
            where = 'hs = :param'
        else:
            return

        res = execute (conn, """
        SELECT ms_id, hs, hsnr
        FROM manuscripts
        WHERE {where}
        """, dict (parameters, where = where, param = param))

        self.ms_id, self.hs, self.hsnr = res.first ()
예제 #15
0
    def __init__(self, conn, manuscript_id_or_hs_or_hsnr):
        """ Initialize from manuscript id or hs or hsnr. """

        self.conn = conn
        self.ms_id = self.hs = self.hsnr = None

        if Manuscript.RE_HSNR.search(manuscript_id_or_hs_or_hsnr):
            where = 'hsnr'
            param = int(manuscript_id_or_hs_or_hsnr)
        elif Manuscript.RE_MSID.search(manuscript_id_or_hs_or_hsnr):
            where = 'ms_id'
            param = int(manuscript_id_or_hs_or_hsnr[2:])
        elif Manuscript.RE_HS.search(manuscript_id_or_hs_or_hsnr):
            where = 'hs'
            param = manuscript_id_or_hs_or_hsnr
        else:
            return

        res = execute(
            conn, """
        SELECT ms_id, hs, hsnr
        FROM manuscripts
        WHERE {where} = :param
        """, dict(parameters, where=where, param=param))

        row = res.fetchone()
        if row is not None:
            self.ms_id, self.hs, self.hsnr = row
예제 #16
0
파일: helpers.py 프로젝트: cceh/ntg
    def readings (self, prefix = None, suffix = None, delete = None):
        # Get a list of all readings for this passage

        prefix = prefix or []
        suffix = suffix or []
        delete = delete or []

        res = execute (self.conn, """
        SELECT labez
        FROM readings
        WHERE pass_id = :pass_id AND labez != 'zz'
        ORDER BY labez
        """, dict (parameters, pass_id = self.pass_id))

        d = collections.OrderedDict ()
        for p in prefix:
            d[p] = p
        for row in res:
            d[row[0]] = row[0]
        for s in suffix:
            d[s] = s
        for dd in delete:
            if dd in d:
                del d[dd]
        for k in d.keys ():
            d[k] = LABEZ_I18N.get (d[k], d[k])

        Readings = collections.namedtuple ('Readings', 'labez labez_i18n')
        return [ Readings._make (r)._asdict () for r in d.items () ]
예제 #17
0
파일: main.py 프로젝트: cceh/ntg
def ranges_json (passage_or_id):
    """Endpoint.  Serve a list of ranges.

    Serves a list of the configured ranges that are contained inside a book in
    the NT.

    :param string passage_or_id: The passage id.
    :param integer bk_id:        The id of the book.

    """

    auth ()

    passage_or_id = request.args.get ('pass_id') or passage_or_id or '0'

    with current_app.config.dba.engine.begin () as conn:
        passage = Passage (conn, passage_or_id)
        bk_id   = request.args.get ('bk_id') or passage.bk_id

        res = execute (conn, """
        SELECT DISTINCT range, range, lower (ch.passage) as begadr, upper (ch.passage) as endadr
        FROM ranges ch
        WHERE bk_id = :bk_id
        ORDER BY begadr, endadr DESC
        """, dict (parameters, bk_id = bk_id))

        Ranges = collections.namedtuple ('Ranges', 'range, value, begadr, endadr')
        # ranges = list (map (Ranges._make, res))
        ranges = [ Ranges._make (r)._asdict () for r in res ]

        return make_json_response (ranges)
예제 #18
0
파일: main.py 프로젝트: cceh/ntg
def leitzeile_json (passage_or_id):
    """Endpoint.  Serve the leitzeile for the verse containing passage_or_id. """

    auth ()

    with current_app.config.dba.engine.begin () as conn:
        passage = Passage (conn, passage_or_id)
        verse_start = (passage.start // 1000) * 1000
        verse_end = verse_start + 999

        res = execute (conn, """
        SELECT l.begadr, l.endadr, l.lemma, ARRAY_AGG (p.pass_id)
        FROM nestle l
          LEFT JOIN passages p ON (p.passage @> l.passage)
        WHERE int4range (:start, :end + 1) @> l.passage
        GROUP BY l.begadr, l.endadr, l.lemma

        UNION -- get the insertions

        SELECT p.begadr, p.endadr, '', ARRAY_AGG (p.pass_id)
        FROM passages_view p
        WHERE int4range (:start, :end + 1) @> p.passage AND (begadr % 2) = 1
        GROUP BY p.begadr, p.endadr

        ORDER BY begadr, endadr DESC
        """, dict (parameters, start = verse_start, end = verse_end))

        Leitzeile = collections.namedtuple (
            'Leitzeile', 'begadr, endadr, lemma, pass_ids')
        leitzeile = [ Leitzeile._make (r)._asdict () for r in res ]

        return make_json_response (leitzeile)
예제 #19
0
    def readings(self, prefix=None, suffix=None, delete=None):
        # Get a list of all readings for this passage

        prefix = prefix or []
        suffix = suffix or []
        delete = delete or []

        res = execute(
            self.conn, """
        SELECT labez
        FROM readings
        WHERE pass_id = :pass_id AND labez != 'zz'
        ORDER BY labez
        """, dict(parameters, pass_id=self.pass_id))

        d = collections.OrderedDict()
        for p in prefix:
            d[p] = p
        for row in res:
            d[row[0]] = row[0]
        for s in suffix:
            d[s] = s
        for dd in delete:
            if dd in d:
                del d[dd]
        for k in d.keys():
            d[k] = LABEZ_I18N.get(d[k], d[k])

        Readings = collections.namedtuple('Readings', 'labez labez_i18n')
        return [Readings._make(r)._asdict() for r in d.items()]
예제 #20
0
파일: main.py 프로젝트: devolt5/ntg
def leitzeile_json(passage_or_id):
    """Endpoint.  Serve the leitzeile for the verse containing passage_or_id. """

    auth()

    with current_app.config.dba.engine.begin() as conn:
        passage = Passage(conn, passage_or_id)
        verse_start = (passage.start // 1000) * 1000
        verse_end = verse_start + 999

        res = execute(
            conn, """
        SELECT l.begadr, l.endadr, l.lemma, ARRAY_AGG (p.pass_id)
        FROM nestle l
          LEFT JOIN passages p ON (p.passage @> l.passage)
        WHERE int4range (:start, :end + 1) @> l.passage
        GROUP BY l.begadr, l.endadr, l.lemma

        UNION -- get the insertions

        SELECT p.begadr, p.endadr, '', ARRAY_AGG (p.pass_id)
        FROM passages_view p
        WHERE int4range (:start, :end + 1) @> p.passage AND (begadr % 2) = 1
        GROUP BY p.begadr, p.endadr

        ORDER BY begadr, endadr DESC
        """, dict(parameters, start=verse_start, end=verse_end))

        Leitzeile = collections.namedtuple('Leitzeile',
                                           'begadr, endadr, lemma, pass_ids')
        leitzeile = [Leitzeile._make(r)._asdict() for r in res]

        return make_json_response(leitzeile)
예제 #21
0
파일: editor.py 프로젝트: devolt5/ntg
def notes_json(range_id):
    """Endpoint.  Get a list of all editor notes."""

    private_auth()

    with current_app.config.dba.engine.begin() as conn:
        res = execute(
            conn, """
        SELECT pass_id, begadr, endadr, note
        FROM passages_view p
        JOIN ranges rg
          ON (rg.passage @> p.passage)
        JOIN notes
          USING (pass_id)
        WHERE rg.rg_id = :range_id
        ORDER BY pass_id
        """, dict(parameters, range_id=range_id))

        Notes = collections.namedtuple('Notes',
                                       'pass_id, begadr, endadr, note')
        notes = []
        for r in res:
            note = Notes._make(r)._asdict()
            note['hr'] = Passage.static_to_hr(note['begadr'], note['endadr'])
            notes.append(note)

        return make_json_response(notes)
예제 #22
0
파일: set_cover.py 프로젝트: cceh/ntg
def init (db):
    """ Do some preparative calculations and cache the results. """

    val = CBGM_Params ()

    with db.engine.begin () as conn:
        # get max number of different cliques in any one passage
        res = execute (conn, """
        SELECT MAX (c)
        FROM (
          SELECT COUNT ((labez, clique)) AS c
          FROM locstem
          WHERE labez !~ '^z'
          GROUP BY pass_id
        ) AS foo
        """, {})
        n_cliques = res.fetchone ()[0]
        # see that the bitmask fits into uint64
        # one bit is reserved for 'unknown' derivation
        assert n_cliques < 64

        # load all attestations into one big numpy array
        create_labez_matrix (db, {}, val)

        # build a mask of all readings of all mss.
        # every labez_clique gets an id (in the range 1..63)

        # Matrix mss x passages containing the bitmask of all manuscripts readings
        val.mask_matrix = np.zeros ((val.n_mss, val.n_passages), dtype = np.uint64)

        res = execute (conn, """
        WITH rn AS (
          {with}
        )
        SELECT msq.ms_id, msq.pass_id, rn1.rn
        FROM ms_cliques AS msq
        JOIN (select * from rn) as rn1
          USING (pass_id, labez, clique)
        """, { 'with' : WITH_SELECT })

        mask_row = collections.namedtuple ('Mask_Row', 'ms_id, pass_id, shift')
        for r in res:
            mask = mask_row._make (r)
            val.mask_matrix[mask.ms_id - 1, mask.pass_id - 1] = np.uint64 (mask.shift)

    return val
예제 #23
0
def comparison_summary ():
    """Output comparison of 2 witnesses, chapter summary.

    Outputs a summary of the differences between 2 manuscripts, one summary row
    per chapters.

    """

    with current_app.config.dba.engine.begin () as conn:
        ms1 = Manuscript (conn, request.args.get ('ms1') or 'A')
        ms2 = Manuscript (conn, request.args.get ('ms2') or 'A')

        res = execute (conn, """
        (WITH ranks AS (
          SELECT ms_id1, ms_id2, rg_id, rank () OVER (PARTITION BY rg_id ORDER BY affinity DESC) AS rank, affinity
          FROM affinity aff
          WHERE ms_id1 = :ms_id1
            AND {prefix}newer > {prefix}older
          ORDER BY affinity DESC
        )

        SELECT a.rg_id, a.range, a.common, a.equal,
               a.older, a.newer, a.unclear, a.affinity, r.rank, ms1_length, ms2_length
        FROM {view} a
        JOIN ranks r     USING (rg_id, ms_id1, ms_id2)
        WHERE a.ms_id1 = :ms_id1 AND a.ms_id2 = :ms_id2
        )

        UNION

        (WITH ranks2 AS (
          SELECT ms_id1, ms_id2, rg_id, rank () OVER (PARTITION BY rg_id ORDER BY affinity DESC) AS rank, affinity
          FROM affinity aff
          WHERE ms_id2 = :ms_id2
            AND {prefix}newer < {prefix}older
          ORDER BY affinity DESC
        )

        SELECT a.rg_id, a.range, a.common, a.equal,
               a.older, a.newer, a.unclear, a.affinity, r.rank, ms1_length, ms2_length
        FROM {view} a
        JOIN ranks2 r USING (rg_id, ms_id1, ms_id2)
        WHERE a.ms_id1 = :ms_id1 AND a.ms_id2 = :ms_id2
        )

        UNION

        SELECT a.rg_id, a.range, a.common, a.equal,
               a.older, a.newer, a.unclear, a.affinity, NULL, ms1_length, ms2_length
        FROM {view} a
        WHERE a.ms_id1 = :ms_id1 AND a.ms_id2 = :ms_id2 AND a.newer = a.older

        ORDER BY rg_id
        """, dict (parameters, ms_id1 = ms1.ms_id, ms_id2 = ms2.ms_id,
                   view = 'affinity_p_view', prefix = 'p_'))

        return list (map (_ComparisonRowCalcFields._make, res))
예제 #24
0
파일: comparison.py 프로젝트: cceh/ntg
def comparison_summary ():
    """Output comparison of 2 witnesses, chapter summary.

    Outputs a summary of the differences between 2 manuscripts, one summary row
    per chapters.

    """

    with current_app.config.dba.engine.begin () as conn:
        ms1 = Manuscript (conn, request.args.get ('ms1') or 'A')
        ms2 = Manuscript (conn, request.args.get ('ms2') or 'A')

        res = execute (conn, """
        (WITH ranks AS (
          SELECT ms_id1, ms_id2, rg_id, rank () OVER (PARTITION BY rg_id ORDER BY affinity DESC) AS rank, affinity
          FROM affinity aff
          WHERE ms_id1 = :ms_id1
            AND {prefix}newer > {prefix}older
          ORDER BY affinity DESC
        )

        SELECT a.rg_id, a.range, a.common, a.equal,
               a.older, a.newer, a.unclear, a.affinity, r.rank, ms1_length, ms2_length
        FROM {view} a
        JOIN ranks r     USING (rg_id, ms_id1, ms_id2)
        WHERE a.ms_id1 = :ms_id1 AND a.ms_id2 = :ms_id2
        )

        UNION

        (WITH ranks2 AS (
          SELECT ms_id1, ms_id2, rg_id, rank () OVER (PARTITION BY rg_id ORDER BY affinity DESC) AS rank, affinity
          FROM affinity aff
          WHERE ms_id2 = :ms_id2
            AND {prefix}newer < {prefix}older
          ORDER BY affinity DESC
        )

        SELECT a.rg_id, a.range, a.common, a.equal,
               a.older, a.newer, a.unclear, a.affinity, r.rank, ms1_length, ms2_length
        FROM {view} a
        JOIN ranks2 r USING (rg_id, ms_id1, ms_id2)
        WHERE a.ms_id1 = :ms_id1 AND a.ms_id2 = :ms_id2
        )

        UNION

        SELECT a.rg_id, a.range, a.common, a.equal,
               a.older, a.newer, a.unclear, a.affinity, NULL, ms1_length, ms2_length
        FROM {view} a
        WHERE a.ms_id1 = :ms_id1 AND a.ms_id2 = :ms_id2 AND a.newer = a.older

        ORDER BY rg_id
        """, dict (parameters, ms_id1 = ms1.ms_id, ms_id2 = ms2.ms_id,
                   view = 'affinity_p_view', prefix = 'p_'))

        return list (map (_ComparisonRowCalcFields._make, res))
예제 #25
0
def import_genealogical_fdw(dbsrc, dbdest, parameters):
    """Import genealogical tables from mysql.

    Import the (28 * 3) mysql tables to 3 tables in the postgres database.

    This function is relevant only for Acts, where we had to import genealogical
    data from a previous implementation of the CBGM.  It is not used for new
    projects.

    """

    if not config.get('MYSQL_VG_DB'):
        return

    dbsrc_meta = sqlalchemy.schema.MetaData(bind=dbsrc.engine)
    dbsrc_meta.reflect()

    with dbdest.engine.begin() as dest:
        if config.get('MYSQL_LOCSTEM_TABLES'):
            log(logging.INFO, "  Importing mysql locstem tables ...")
            concat_tables_fdw(dest, dbsrc_meta, 'original_locstemed',
                              'var_fdw', config['MYSQL_LOCSTEM_TABLES'])

    with dbdest.engine.begin() as dest:
        if config.get('MYSQL_RDG_TABLES'):
            log(logging.INFO, "  Importing mysql rdg tables ...")
            concat_tables_fdw(dest, dbsrc_meta, 'original_rdg', 'var_fdw',
                              config['MYSQL_RDG_TABLES'])

    with dbdest.engine.begin() as dest:
        if config.get('MYSQL_VAR_TABLES'):
            log(logging.INFO, "  Importing mysql var tables ...")
            concat_tables_fdw(dest, dbsrc_meta, 'original_var', 'var_fdw',
                              config['MYSQL_VAR_TABLES'])

    with dbdest.engine.begin() as dest:
        if config.get('MYSQL_MEMO_TABLE'):
            log(logging.INFO, "  Importing mysql memo table ...")
            copy_table_fdw(dest, 'original_memo', 'var_fdw',
                           config['MYSQL_MEMO_TABLE'])
            execute(
                dest, """
            ALTER TABLE original_memo RENAME COLUMN anfadr TO begadr;
            """, parameters)
예제 #26
0
파일: set_cover.py 프로젝트: devolt5/ntg
def build_explain_matrix(conn, val, ms_id):
    """Build the explain matrix.

    A matrix of 1 x n_passages containing the bitmask of all those readings that
    would explain the reading in the manuscript under scrutiny.

    Bit 1 means: the reading stems from an unknown source.
    Bit 2..64 are the bitmask of all cliques.

    """

    explain_matrix = np.zeros(val.n_passages, dtype=np.uint64)

    res = execute(
        conn, """
    WITH RECURSIVE
    rn AS (
      {with}
    ),
    lsrn AS (
      SELECT ls.pass_id, ls.labez, ls.clique,
        -- set 1 as flag for unknown derivation
        CASE WHEN ls.source_labez = '?' THEN rn1.rn | 1 ELSE rn1.rn END AS rn1,
        rn2.rn AS rn2
      FROM locstem ls
      JOIN rn as rn1
        USING (pass_id, labez, clique)
      LEFT JOIN rn as rn2
        ON (ls.pass_id, ls.source_labez, ls.source_clique) = (rn2.pass_id, rn2.labez, rn2.clique)
    ),
    lsrec (pass_id, rn1, rn2) AS (
      SELECT lsrn.pass_id, lsrn.rn1, lsrn.rn2
      FROM ms_cliques AS msq
        JOIN lsrn USING (pass_id, labez, clique)
      WHERE ms_id = :ms_id
    UNION
      SELECT lsrn.pass_id, lsrn.rn1, lsrn.rn2
      FROM lsrec
      JOIN lsrn
        ON (lsrn.pass_id = lsrec.pass_id AND (lsrn.rn1 & ~B'1'::integer) = lsrec.rn2)
    )
    SELECT pass_id, BIT_OR (rn1) AS rn
    FROM lsrec
    GROUP BY pass_id
    ORDER BY pass_id;
    """, {
            'with': WITH_SELECT,
            'ms_id': ms_id
        })

    explain_row = collections.namedtuple('Explain_Row', 'pass_id, mask')
    for r in res:
        mask = explain_row._make(r)
        explain_matrix[mask.pass_id - 1] = np.uint64(mask.mask)

    return explain_matrix
예제 #27
0
파일: helpers.py 프로젝트: cceh/ntg
    def get_length (self, passage, range_ = '0'):
        # Get the length of the manuscript, ie. the no. of existing passages

        res = execute (self.conn, """
        SELECT length
        FROM ms_ranges_view
        WHERE ms_id = :ms_id AND bk_id = :bk_id AND range = :range_
        """, dict (parameters, ms_id = self.ms_id, bk_id = passage.bk_id, range_ = range_))

        return res.fetchone ()[0]
예제 #28
0
    def get_length(self, rg_id):
        # Get the length of the manuscript, ie. the no. of existing passages

        res = execute(
            self.conn, """
        SELECT length
        FROM ms_ranges
        WHERE ms_id = :ms_id AND rg_id = :rg_id
        """, dict(parameters, ms_id=self.ms_id, rg_id=rg_id))

        return res.fetchone()[0]
예제 #29
0
파일: main.py 프로젝트: devolt5/ntg
def apparatus_json(passage_or_id):
    """ The contents of the apparatus table. """

    auth()

    with current_app.config.dba.engine.begin() as conn:
        passage = Passage(conn, passage_or_id)

        # list of labez => lesart
        res = execute(
            conn, """
        SELECT labez, reading (labez, lesart)
        FROM readings
        WHERE pass_id = :pass_id
        ORDER BY labez
        """, dict(parameters, pass_id=passage.pass_id))

        Readings = collections.namedtuple('Readings', 'labez lesart')
        readings = [Readings._make(r)._asdict() for r in res]

        # list of labez_clique => manuscripts
        res = execute(
            conn, """
        SELECT labez, clique, labez_clique, labezsuf, reading (labez, lesart), ms_id, hs, hsnr, certainty
        FROM apparatus_view_agg
        WHERE pass_id = :pass_id
        ORDER BY hsnr, labez, clique
        """, dict(parameters, pass_id=passage.pass_id))

        Manuscripts = collections.namedtuple(
            'Manuscripts',
            'labez clique labez_clique labezsuf lesart ms_id hs hsnr certainty'
        )
        manuscripts = [Manuscripts._make(r)._asdict() for r in res]

        return make_json_response({
            'readings': readings,
            'manuscripts': manuscripts,
        })

    return 'Error'
예제 #30
0
파일: helpers.py 프로젝트: cceh/ntg
    def range_id (self, range_ = None):
        """ Return the id of the range containing this passage. """

        range_ = range_ or self.chapter

        res = execute (self.conn, """
        SELECT rg_id
        FROM ranges_view
        WHERE bk_id = :bk_id AND range = :range_
        """, dict (parameters, bk_id = self.bk_id, range_ = range_))

        return res.fetchone ()[0]
예제 #31
0
파일: set_cover.py 프로젝트: cceh/ntg
def build_explain_matrix (conn, val, ms_id):
    """Build the explain matrix.

    A matrix of 1 x n_passages containing the bitmask of all those readings that
    would explain the reading in the manuscript under scrutiny.

    Bit 1 means: the reading stems from an unknown source.
    Bit 2..64 are the bitmask of all cliques.

    """

    explain_matrix = np.zeros (val.n_passages, dtype = np.uint64)

    res = execute (conn, """
    WITH RECURSIVE
    rn AS (
      {with}
    ),
    lsrn AS (
      SELECT ls.pass_id, ls.labez, ls.clique,
        -- set 1 as flag for unknown derivation
        CASE WHEN (ls.source_labez IS NULL) AND NOT ls.original THEN rn1.rn | 1 ELSE rn1.rn END AS rn1,
        rn2.rn AS rn2
      FROM locstem ls
      JOIN rn as rn1
        USING (pass_id, labez, clique)
      LEFT JOIN rn as rn2
        ON (ls.pass_id, ls.source_labez, ls.source_clique) = (rn2.pass_id, rn2.labez, rn2.clique)
    ),
    lsrec (pass_id, rn1, rn2) AS (
      SELECT lsrn.pass_id, lsrn.rn1, lsrn.rn2
      FROM ms_cliques AS msq
        JOIN lsrn USING (pass_id, labez, clique)
      WHERE ms_id = :ms_id
    UNION
      SELECT lsrn.pass_id, lsrn.rn1, lsrn.rn2
      FROM lsrec
      JOIN lsrn
        ON (lsrn.pass_id = lsrec.pass_id AND (lsrn.rn1 & ~B'1'::integer) = lsrec.rn2)
    )
    SELECT pass_id, BIT_OR (rn1) AS rn
    FROM lsrec
    GROUP BY pass_id
    ORDER BY pass_id;
    """, { 'with' : WITH_SELECT, 'ms_id' : ms_id })

    explain_row = collections.namedtuple ('Explain_Row', 'pass_id, mask')
    for r in res:
        mask = explain_row._make (r)
        explain_matrix[mask.pass_id - 1] = np.uint64 (mask.mask)

    return explain_matrix
예제 #32
0
파일: main.py 프로젝트: cceh/ntg
def apparatus_json (passage_or_id):
    """ The contents of the apparatus table. """

    auth ()

    with current_app.config.dba.engine.begin () as conn:
        passage = Passage (conn, passage_or_id)

        # list of labez => lesart
        res = execute (conn, """
        SELECT labez, reading (labez, lesart)
        FROM readings
        WHERE pass_id = :pass_id
        ORDER BY labez
        """, dict (parameters, pass_id = passage.pass_id))

        Readings = collections.namedtuple ('Readings', 'labez lesart')
        readings = [ Readings._make (r)._asdict () for r in res ]

        # list of labez_clique => manuscripts
        res = execute (conn, """
        SELECT labez, clique, labez_clique, labezsuf, reading (labez, lesart), ms_id, hs, hsnr, certainty
        FROM apparatus_cliques_view
        WHERE pass_id = :pass_id
        ORDER BY hsnr, labez, clique
        """, dict (parameters, pass_id = passage.pass_id))

        Manuscripts = collections.namedtuple (
            'Manuscripts',
            'labez clique labez_clique labezsuf lesart ms_id hs hsnr certainty'
        )
        manuscripts = [ Manuscripts._make (r)._asdict () for r in res ]

        return make_json_response ({
            'readings'    : readings,
            'manuscripts' : manuscripts,
        })

    return 'Error'
예제 #33
0
파일: helpers.py 프로젝트: cceh/ntg
    def __init__ (self, conn, passage_or_id):
        """ Initialize from passage or passage id. """

        self.conn = conn
        self.pass_id, self.start, self.end, self.bk_id, self.chapter = 0, 0, 0, 0, 0
        start, end =  self.fix (str (passage_or_id))

        if int (start) > 10000000:
            res = execute (conn, """
            SELECT pass_id, begadr, endadr, adr2bk_id (begadr), adr2chapter (begadr)
            FROM passages
            WHERE begadr = :begadr AND endadr = :endadr
            """, dict (parameters, begadr = start, endadr = end))
        else:
            res = execute (conn, """
            SELECT pass_id, begadr, endadr, adr2bk_id (begadr), adr2chapter (begadr)
            FROM passages
            WHERE pass_id = :pass_id
            """, dict (parameters, pass_id = start))

        res = res.first ()
        if res is not None:
            self.pass_id, self.start, self.end, self.bk_id, self.chapter = res
예제 #34
0
    def range_id(self, range_=None):
        """ Return the id of the range containing this passage. """

        range_ = range_ or str(self.chapter)

        res = execute(
            self.conn, """
        SELECT rg_id
        FROM ranges_view
        WHERE bk_id = :bk_id AND range = :range_
        """, dict(parameters, bk_id=self.bk_id, range_=range_))

        row = res.fetchone()
        return row[0] if row is not None else None
예제 #35
0
파일: editor.py 프로젝트: cceh/ntg
def notes_txt (passage_or_id):
    """Get the editor notes for a passage

    """

    if not flask_login.current_user.has_role ('editor'):
        raise PrivilegeError ('You don\'t have editor privilege.')

    with current_app.config.dba.engine.begin () as conn:
        passage = Passage (conn, passage_or_id)

        if request.method == 'PUT':
            res = execute (conn, """
            SET LOCAL ntg.user_id = :user_id;
            """, dict (parameters, user_id = flask_login.current_user.id))

            res = execute (conn, """
            INSERT INTO notes AS n (pass_id, note)
            VALUES (:pass_id, :note)
            ON CONFLICT (pass_id) DO
            UPDATE
            SET note = :note
            WHERE n.pass_id = EXCLUDED.pass_id
            """, dict (parameters,
                       pass_id = passage.pass_id,
                       note = request.get_json ()['remarks']))

            return make_json_response (message = 'Notes saved.')
        res = execute (conn, """
        SELECT note
        FROM notes
        WHERE pass_id = :pass_id
        """, dict (parameters, pass_id = passage.pass_id))

        if res.rowcount > 0:
            return make_text_response (res.fetchone ()[0])
        return make_text_response ('')
예제 #36
0
파일: helpers.py 프로젝트: cceh/ntg
    def cliques (self, prefix = None, suffix = None, delete = None):
        # Get a list of all cliques for this passage

        prefix = prefix or []
        suffix = suffix or []
        delete = delete or []

        res = execute (self.conn, """
        SELECT labez, clique, labez_clique (labez, clique) AS labez_clique
        FROM cliques
        WHERE pass_id = :pass_id
        ORDER BY labez, clique
        """, dict (parameters, pass_id = self.pass_id))

        Cliques = collections.namedtuple ('Cliques', 'labez clique labez_clique')
        return [ Cliques._make (r)._asdict ()
                 for r in prefix + list (res.fetchall ()) + suffix if r not in delete ]
예제 #37
0
파일: set_cover.py 프로젝트: devolt5/ntg
def get_ancestors(conn, rg_id, ms_id):
    """ Get all ancestors of ms. """

    mode = 'sim'
    view = 'affinity_view' if mode == 'rec' else 'affinity_p_view'

    res = execute(
        conn, """
    SELECT aff.ms_id2 as ms_id
    FROM
      {view} aff
    WHERE aff.ms_id1 = :ms_id1 AND aff.rg_id = :rg_id
          AND aff.common > 0 AND aff.older < aff.newer
    ORDER BY affinity DESC, newer DESC, older DESC
    """, dict(ms_id1=ms_id, rg_id=rg_id, view=view))

    return frozenset([r[0] for r in res])
예제 #38
0
파일: main.py 프로젝트: cceh/ntg
def attesting_csv (passage_or_id, labez):
    """ Serve all relatives of all mss. attesting labez at passage. """

    auth ()

    with current_app.config.dba.engine.begin () as conn:
        passage = Passage (conn, passage_or_id)

        res = execute (conn, """
        SELECT ms_id, hs, hsnr
        FROM apparatus_view
        WHERE pass_id = :pass_id AND labez = :labez
        ORDER BY hsnr
        """, dict (parameters, pass_id = passage.pass_id, labez = labez))

        Attesting = collections.namedtuple ('Attesting', 'ms_id hs hsnr')

        return csvify (Attesting._fields, list (map (Attesting._make, res)))
예제 #39
0
파일: set_cover.py 프로젝트: cceh/ntg
def get_ancestors (conn, rg_id, ms_id):
    """ Get all ancestors of ms. """

    mode = 'sim'
    view = 'affinity_view' if mode == 'rec' else 'affinity_p_view'

    res = execute (conn, """
    SELECT aff.ms_id2 as ms_id
    FROM
      {view} aff
    WHERE aff.ms_id1 = :ms_id1 AND aff.rg_id = :rg_id
          AND aff.common > 0 AND aff.older < aff.newer
    ORDER BY affinity DESC, newer DESC, older DESC
    """, dict (ms_id1  = ms_id,
               rg_id   = rg_id,
               view    = view))

    return frozenset ([r[0] for r in res])
예제 #40
0
파일: set_cover.py 프로젝트: devolt5/ntg
def optimal_substemma_detail_csv():
    """Report details about one combination of ancestors.
    """

    if current_app.config.val is None:
        current_app.config.val = init(current_app.config.dba)
    val = current_app.config.val

    with current_app.config.dba.engine.begin() as conn:
        # the manuscript to explain
        ms = Manuscript(conn, request.args.get('ms'))

        # get the selected set of ancestors
        selected = [
            Manuscript(conn, anc_id)
            for anc_id in (request.args.get('selection') or '').split()
        ]

        combinations = [Combination(selected, 0)]
        explain_matrix = build_explain_matrix(conn, val, ms.ms_id)
        _optimal_substemma(ms.ms_id,
                           explain_matrix,
                           combinations,
                           mode='detail')

        res = execute(
            conn, """
        SELECT 'unknown' as type, p.pass_id, p.begadr, p.endadr, v.labez_clique, v.lesart
        FROM passages p
          JOIN apparatus_cliques_view v USING (pass_id)
        WHERE v.ms_id = :ms_id AND pass_id IN :unknown_pass_ids
        UNION
        SELECT 'open' as type, p.pass_id, p.begadr, p.endadr, v.labez_clique, v.lesart
        FROM passages p
          JOIN apparatus_cliques_view v USING (pass_id)
        WHERE v.ms_id = :ms_id AND pass_id IN :open_pass_ids
        """,
            dict(ms_id=ms.ms_id,
                 unknown_pass_ids=combinations[0].unknown_indices or (-1, ),
                 open_pass_ids=combinations[0].open_indices or (-1, )))

        return csvify(
            _OptimalSubstemmaDetailRowCalcFields._fields,
            list(map(_OptimalSubstemmaDetailRowCalcFields._make, res)))
예제 #41
0
파일: main.py 프로젝트: devolt5/ntg
def attesting_csv(passage_or_id, labez):
    """ Serve all relatives of all mss. attesting labez at passage. """

    auth()

    with current_app.config.dba.engine.begin() as conn:
        passage = Passage(conn, passage_or_id)

        res = execute(
            conn, """
        SELECT ms_id, hs, hsnr
        FROM apparatus_view
        WHERE pass_id = :pass_id AND labez = :labez
        ORDER BY hsnr
        """, dict(parameters, pass_id=passage.pass_id, labez=labez))

        Attesting = collections.namedtuple('Attesting', 'ms_id hs hsnr')

        return csvify(Attesting._fields, list(map(Attesting._make, res)))
예제 #42
0
    def cliques(self, prefix=None, suffix=None, delete=None):
        # Get a list of all cliques for this passage

        prefix = prefix or []
        suffix = suffix or []
        delete = delete or []

        res = execute(
            self.conn, """
        SELECT labez, clique, labez_clique (labez, clique) AS labez_clique
        FROM cliques
        WHERE pass_id = :pass_id
        ORDER BY labez, clique
        """, dict(parameters, pass_id=self.pass_id))

        Cliques = collections.namedtuple('Cliques',
                                         'labez clique labez_clique')
        return [
            Cliques._make(r)._asdict()
            for r in prefix + list(res.fetchall()) + suffix if r not in delete
        ]
예제 #43
0
파일: helpers.py 프로젝트: cceh/ntg
def get_excluded_ms_ids (conn, include):
    """Get the ms_ids of manuscripts to exclude.

    Helps to implement the buttons "A", "MT", and "F" in the toolbar.

    """

    exclude = set (EXCLUDE_REGEX_MAP.keys ()) - set (include)
    if not exclude:
        return tuple ([-1]) # a non-existing id to avoid an SQL syntax error
    exclude = [ EXCLUDE_REGEX_MAP[x] for x in exclude]

    # get ids of nodes to exclude
    res = execute (conn, """
    SELECT ms_id
    FROM manuscripts
    WHERE hs ~ '^({exclude})$'
    ORDER BY ms_id
    """, dict (parameters, exclude = '|'.join (exclude)))

    return tuple ([ row[0] for row in res ] or [ -1 ])
예제 #44
0
def get_excluded_ms_ids(conn, include):
    """Get the ms_ids of manuscripts to exclude.

    Helps to implement the buttons "A", "MT", and "F" in the toolbar.

    """

    exclude = set(EXCLUDE_REGEX_MAP.keys()) - set(include)
    if not exclude:
        return tuple([-1])  # a non-existing id to avoid an SQL syntax error
    exclude = [EXCLUDE_REGEX_MAP[x] for x in exclude]

    # get ids of nodes to exclude
    res = execute(
        conn, """
    SELECT ms_id
    FROM manuscripts
    WHERE hs ~ '^({exclude})$'
    ORDER BY ms_id
    """, dict(parameters, exclude='|'.join(exclude)))

    return tuple([row[0] for row in res] or [-1])
예제 #45
0
파일: editor.py 프로젝트: cceh/ntg
def notes_json ():
    """Endpoint.  Get a list of all editor notes."""

    if not flask_login.current_user.has_role ('editor'):
        raise PrivilegeError ('You don\'t have editor privilege.')

    with current_app.config.dba.engine.begin () as conn:
        res = execute (conn, """
        SELECT pass_id, begadr, endadr, note
        FROM passages_view
        JOIN notes
          USING (pass_id)
        """, dict (parameters))

        Notes = collections.namedtuple ('Notes', 'pass_id, begadr, endadr, note')
        notes = []
        for r in res:
            note = Notes._make (r)._asdict ()
            note['hr'] = Passage.static_to_hr (note['begadr'], note['endadr'])
            notes.append (note)

        return make_json_response (notes)
예제 #46
0
파일: main.py 프로젝트: cceh/ntg
def attestation_json (passage_or_id):

    auth ()

    with current_app.config.dba.engine.begin () as conn:
        passage = Passage (conn, passage_or_id)

        res = execute (conn, """
        SELECT ms_id, labez
        FROM apparatus
        WHERE pass_id = :pass_id
        ORDER BY ms_id
        """, dict (parameters, pass_id = passage.pass_id))

        attestations = {}
        for row in res:
            ms_id, labez = row
            attestations[str (ms_id)] = labez

        return make_json_response ({
            'attestations': attestations
        })
예제 #47
0
파일: set_cover.py 프로젝트: cceh/ntg
def optimal_substemma_detail_csv ():
    """Report details about one combination of ancestors.
    """

    if current_app.config.val is None:
        current_app.config.val = init (current_app.config.dba)
    val = current_app.config.val

    with current_app.config.dba.engine.begin () as conn:
        # the manuscript to explain
        ms = Manuscript (conn, request.args.get ('ms'))

        # get the selected set of ancestors
        selected = [ Manuscript (conn, anc_id)
                     for anc_id in (request.args.get ('selection') or '').split () ]

        combinations   = [Combination (selected, 0)]
        explain_matrix = build_explain_matrix (conn, val, ms.ms_id)
        _optimal_substemma (ms.ms_id, explain_matrix, combinations, mode = 'detail')

        res = execute (conn, """
        SELECT 'unknown' as type, p.pass_id, p.begadr, p.endadr, v.labez_clique, v.lesart
        FROM passages p
          JOIN apparatus_cliques_view v USING (pass_id)
        WHERE v.ms_id = :ms_id AND pass_id IN :unknown_pass_ids
        UNION
        SELECT 'open' as type, p.pass_id, p.begadr, p.endadr, v.labez_clique, v.lesart
        FROM passages p
          JOIN apparatus_cliques_view v USING (pass_id)
        WHERE v.ms_id = :ms_id AND pass_id IN :open_pass_ids
        """, dict (
            ms_id = ms.ms_id,
            unknown_pass_ids = combinations[0].unknown_indices,
            open_pass_ids    = combinations[0].open_indices
        ))

        return csvify (_OptimalSubstemmaDetailRowCalcFields._fields,
                       list (map (_OptimalSubstemmaDetailRowCalcFields._make, res)))
예제 #48
0
파일: main.py 프로젝트: devolt5/ntg
def attestation_json(passage_or_id):
    """Answer with a list of the attestations of all manuscripts at one specified
    passage."""

    auth()

    with current_app.config.dba.engine.begin() as conn:
        passage = Passage(conn, passage_or_id)

        res = execute(
            conn, """
        SELECT ms_id, labez
        FROM apparatus
        WHERE pass_id = :pass_id
        ORDER BY ms_id
        """, dict(parameters, pass_id=passage.pass_id))

        attestations = {}
        for row in res:
            ms_id, labez = row
            attestations[str(ms_id)] = labez

        return make_json_response({'attestations': attestations})
예제 #49
0
파일: main.py 프로젝트: devolt5/ntg
def ranges_json():
    """Endpoint.  Serve a list of ranges.

    Serves a list of the configured ranges that are contained inside a book in
    the NT.

    """

    conf = current_app.config
    with conf.dba.engine.begin() as conn:
        res = execute(
            conn, """
        SELECT DISTINCT bk_id, book, rg_id, range, lower (rg.passage) as begadr, upper (rg.passage) as endadr
        FROM ranges_view rg
        WHERE bk_id = :bk_id
        ORDER BY begadr, endadr DESC
        """, dict(parameters, bk_id=conf.bk_id))

        Ranges = collections.namedtuple(
            'Ranges', 'bk_id, book, rg_id, range, begadr, endadr')
        ranges = [Ranges._make(r)._asdict() for r in res]

        return cache(make_json_response(ranges))
예제 #50
0
파일: cbgm.py 프로젝트: SCDH/intf-cbgm
def build_A_text(dba, parameters):
    """Build the 'A' text

    The editors' reconstruction of the archetype is recorded in the locstem
    table. This functions generates a virtual manuscript 'A' from those choices.

    The designation of a passage as 'Fehlvers' is an editorial decision that the
    verse is not original, so we set 'zu'.

    If the editors came to no final decision, no 'original' reading will be
    found in locstem.  In this case we set 'A' to 'zz' and there will be a gap
    in the reconstructed text.

    The Lesart of 'A' is always NULL, because it is a virtual manuscript.

    """

    with dba.engine.begin() as conn:

        execute(
            conn, """
        DELETE FROM ms_cliques     WHERE ms_id = :ms_id;
        DELETE FROM ms_cliques_tts WHERE ms_id = :ms_id;
        DELETE FROM apparatus      WHERE ms_id = :ms_id;
        """, dict(parameters, ms_id=MS_ID_A))

        # Fill with the original reading in locstem or 'zz' if none found
        execute(
            conn, """
        INSERT INTO apparatus_cliques_view (ms_id, pass_id, labez, clique, cbgm, origin, lesart)
          SELECT :ms_id, p.pass_id, COALESCE (l.labez, 'zz'), COALESCE (l.clique, '1'), true, 'LOC', NULL
          FROM passages p
          LEFT JOIN locstem l ON (l.pass_id, l.source_labez) = (p.pass_id, '*')
          WHERE NOT p.fehlvers
        """, dict(parameters, ms_id=MS_ID_A))

        # Fill Fehlverse with labez 'zu'
        execute(
            conn, """
        INSERT INTO apparatus_cliques_view (ms_id, pass_id, labez, clique, cbgm, origin, lesart)
          SELECT :ms_id, p.pass_id, 'zu', '1', true, 'LOC', NULL
          FROM passages p
          WHERE p.fehlvers
        """, dict(parameters, ms_id=MS_ID_A))
예제 #51
0
파일: cbgm.py 프로젝트: cceh/ntg
def build_A_text (dba, parameters):
    """Build the 'A' text

    The editors' reconstruction of the archetype is recorded in the locstem
    table. This functions generates a virtual manuscript 'A' from those choices.

    The designation of a passage as 'Fehlvers' is an editorial decision that the
    verse is not original, so we set 'zu'.

    If the editors came to no final decision, no 'original' reading will be
    found in locstem.  In this case we set 'A' to 'zz' and there will be a gap
    in the reconstructed text.

    The Lesart of 'A' is always NULL, because it is a virtual manuscript.

    """

    with dba.engine.begin () as conn:

        execute (conn, """
        DELETE FROM ms_cliques     WHERE ms_id = :ms_id;
        DELETE FROM ms_cliques_tts WHERE ms_id = :ms_id;
        DELETE FROM apparatus      WHERE ms_id = :ms_id;
        """, dict (parameters, ms_id = MS_ID_A))

        # Fill with the original reading in locstem or 'zz' if none found
        execute (conn, """
        INSERT INTO apparatus_cliques_view (ms_id, pass_id, labez, clique, cbgm, origin, lesart)
          SELECT :ms_id, p.pass_id, COALESCE (l.labez, 'zz'), COALESCE (l.clique, '1'), true, 'LOC', NULL
          FROM passages p
          LEFT JOIN locstem l ON (l.pass_id, l.original) = (p.pass_id, true)
          WHERE NOT p.fehlvers
        """, dict (parameters, ms_id = MS_ID_A))

        # Fill Fehlverse with labez 'zu'
        execute (conn, """
        INSERT INTO apparatus_cliques_view (ms_id, pass_id, labez, clique, cbgm, origin, lesart)
          SELECT :ms_id, p.pass_id, 'zu', '1', true, 'LOC', NULL
          FROM passages p
          WHERE p.fehlvers
        """, dict (parameters, ms_id = MS_ID_A))
예제 #52
0
파일: load_edits.py 프로젝트: devolt5/ntg
    db = db_tools.PostgreSQLEngine (**config)

    tree = lxml.etree.parse (args.input if args.input != '-' else sys.stdin)

    with db.engine.begin () as conn:
        db_tools.truncate_editor_tables (conn)

    log (logging.INFO, "Loading cliques ...")

    with db.engine.begin () as conn:
        values = []
        for row in tree.xpath ('/sql/export_cliques/row'):
            values.append ({ e.tag : e.text for e in row })

        execute (conn, """
        TRUNCATE import_cliques;
        """, parameters)

        executemany (conn, """
        INSERT INTO import_cliques (passage, labez, clique,
                                    sys_period, user_id_start, user_id_stop)
        VALUES (:passage, :labez, :clique,
                :sys_period, :user_id_start, :user_id_stop)
        """, parameters, values)

        execute (conn, """
        UPDATE import_cliques u
        SET pass_id = r.pass_id
        FROM readings_view r
        WHERE (u.passage, u.labez) = (r.passage, r.labez)
        """, parameters)
예제 #53
0
        db_tools.init_default_cliques(conn)
        log(logging.INFO, "Build default ms_cliques ...")
        db_tools.init_default_ms_cliques(conn)
        log(logging.INFO, "Build default locstem ...")
        db_tools.init_default_locstem(conn)
        # default notes is an empty table

    log(logging.INFO, "Loading cliques ...")

    with db.engine.begin() as conn:
        values = []
        for row in tree.xpath('/sql/export_cliques/row'):
            values.append({e.tag: e.text for e in row})

        execute(conn, """
        TRUNCATE import_cliques;
        """, parameters)

        executemany(
            conn, """
        INSERT INTO import_cliques (passage, labez, clique,
                                    sys_period, user_id_start, user_id_stop)
        VALUES (:passage, :labez, :clique,
                :sys_period, :user_id_start, :user_id_stop)
        """, parameters, values)

        execute(
            conn, """
        UPDATE import_cliques u
        SET pass_id = r.pass_id
        FROM readings_view r
예제 #54
0
    init_logging(
        args,
        logging.StreamHandler(),  # stderr
        logging.FileHandler('mk_users.log'))

    dba = db_tools.PostgreSQLEngine(**config)

    db.Base3.metadata.drop_all(dba.engine)
    db.Base3.metadata.create_all(dba.engine)

    pwd_context = CryptContext(schemes=[config['USER_PASSWORD_HASH']])

    with dba.engine.begin() as src:
        # create the basic roles
        execute(
            src,
            "INSERT INTO role (id, name, description) VALUES (1, 'admin',  'Administrator')",
            {})
        execute(
            src,
            "INSERT INTO role (id, name, description) VALUES (2, 'editor', 'Editor')",
            {})

        # create the admin user
        if args.email:
            params = {
                "username": args.username,
                "email": args.email,
                "password":
                pwd_context.hash(args.password) if args.password else '',
                "active": True,
                "confirmed_at": datetime.datetime.now()
예제 #55
0
파일: main.py 프로젝트: devolt5/ntg
def relatives_csv(passage_or_id, hs_hsnr_id):
    """Output a table of the nearest relatives of a manuscript.

    Output a table of the nearest relatives/ancestors/descendants of a
    manuscript and what they attest.

    """

    auth()

    type_ = request.args.get('type') or 'rel'
    limit = int(request.args.get('limit') or 0)
    labez = request.args.get('labez') or 'all'
    mode = request.args.get('mode') or 'sim'
    include = request.args.getlist('include[]') or []
    fragments = request.args.getlist('fragments[]') or []

    view = 'affinity_view' if mode == 'rec' else 'affinity_p_view'

    where = ''
    if type_ == 'anc':
        where = ' AND older < newer'
    if type_ == 'des':
        where = ' AND older >= newer'

    if labez == 'all':
        where += " AND labez !~ '^z'"
    elif labez == 'all+lac':
        pass
    else:
        where += " AND labez = '%s'" % labez

    if 'fragments' in fragments:
        frag_where = ''
    else:
        frag_where = 'AND aff.common > aff.ms1_length / 2'

    limit = '' if limit == 0 else ' LIMIT %d' % limit

    with current_app.config.dba.engine.begin() as conn:

        passage = Passage(conn, passage_or_id)
        ms = Manuscript(conn, hs_hsnr_id)
        rg_id = passage.request_rg_id(request)

        exclude = get_excluded_ms_ids(conn, include)

        # Get the X most similar manuscripts and their attestations
        res = execute(
            conn, """
        /* get the LIMIT closest ancestors for this node */
        WITH ranks AS (
          SELECT ms_id1, ms_id2,
            rank () OVER (ORDER BY affinity DESC, common, older, newer DESC, ms_id2) AS rank,
            affinity
          FROM {view} aff
          WHERE ms_id1 = :ms_id1 AND aff.rg_id = :rg_id AND ms_id2 NOT IN :exclude
            AND newer > older {frag_where}
          ORDER BY affinity DESC
        )

        SELECT r.rank,
               aff.ms_id2 as ms_id,
               ms.hs,
               ms.hsnr,
               aff.ms2_length,
               aff.common,
               aff.equal,
               aff.older,
               aff.newer,
               aff.unclear,
               aff.common - aff.equal - aff.older - aff.newer - aff.unclear as norel,
               CASE WHEN aff.newer < aff.older THEN ''
                    WHEN aff.newer = aff.older THEN '-'
                    ELSE '>'
               END as direction,
               aff.affinity,
               a.labez,
               a.certainty
        FROM
          {view} aff
        JOIN apparatus_view_agg a
          ON aff.ms_id2 = a.ms_id
        JOIN manuscripts ms
          ON aff.ms_id2 = ms.ms_id
        LEFT JOIN ranks r
          ON r.ms_id2 = aff.ms_id2
        WHERE aff.ms_id2 NOT IN :exclude AND aff.ms_id1 = :ms_id1
              AND aff.rg_id = :rg_id AND aff.common > 0
              AND a.pass_id = :pass_id {where} {frag_where}
        ORDER BY affinity DESC, r.rank, newer DESC, older DESC, hsnr
        {limit}
        """,
            dict(parameters,
                 where=where,
                 frag_where=frag_where,
                 ms_id1=ms.ms_id,
                 hsnr=ms.hsnr,
                 pass_id=passage.pass_id,
                 rg_id=rg_id,
                 limit=limit,
                 view=view,
                 exclude=exclude))

        Relatives = collections.namedtuple(
            'Relatives',
            'rank ms_id hs hsnr length common equal older newer unclear norel direction affinity labez certainty'
        )
        return csvify(Relatives._fields, list(map(Relatives._make, res)))
예제 #56
0
파일: load_edits.py 프로젝트: cceh/ntg
        db_tools.init_default_cliques (conn)
        log (logging.INFO, "Build default ms_cliques ...")
        db_tools.init_default_ms_cliques (conn)
        log (logging.INFO, "Build default locstem ...")
        db_tools.init_default_locstem (conn)
        # default notes is an empty table

    log (logging.INFO, "Loading cliques ...")

    with db.engine.begin () as conn:
        values = []
        for row in tree.xpath ('/sql/export_cliques/row'):
            values.append ({ e.tag : e.text for e in row })

        execute (conn, """
        TRUNCATE import_cliques;
        """, parameters)

        executemany (conn, """
        INSERT INTO import_cliques (passage, labez, clique,
                                    sys_period, user_id_start, user_id_stop)
        VALUES (:passage, :labez, :clique,
                :sys_period, :user_id_start, :user_id_stop)
        """, parameters, values)

        execute (conn, """
        UPDATE import_cliques u
        SET pass_id = r.pass_id
        FROM readings_view r
        WHERE (u.passage, u.labez) = (r.passage, r.labez)
        """, parameters)
예제 #57
0
파일: import.py 프로젝트: cceh/ntg
def concat_tables_fdw (conn, meta, dest_table, fdw, table_mask):
    """Concatenate multiple tables into one."""

    table_mask = re.compile ('^%s$' % table_mask)

    # find the set of fields common to all input tables.  check types also.  it
    # is ridiculous that we have to do this but the table structures are highly
    # inconsistent even between chapters of the same book.
    source_table = None
    column_set = collections.OrderedDict ()
    for t in sorted (meta.tables.keys ()):
        if table_mask.match (t):
            source_model = sqlalchemy.Table (t, meta, autoload = True)
            if source_table is None:
                source_table = t
                for c in source_model.columns:
                    column_set[c.name] = c.type.python_type
            else:
                col_set = { c.name : c.type.python_type for c in source_model.columns }
                for name, type_ in list (column_set.items ()):
                    if col_set.get (name, '') != type_:
                        del column_set[name]

    # create a table with those fields common to all input tables, lowercase the
    # field names
    execute (conn, """
    DROP TABLE IF EXISTS {dest_table}
    """, dict (parameters, dest_table = dest_table))

    execute (conn, """
    CREATE TABLE {dest_table} ( LIKE {fdw}."{source_table}" )
    """, dict (parameters, dest_table = dest_table, source_table = source_table, fdw = fdw))

    source_model = sqlalchemy.Table (source_table, meta, autoload = True)
    cols = [column.name for column in source_model.columns]

    for column in cols:
        if column in column_set:
            if column != column.lower ():
                execute (conn, 'ALTER TABLE {dest_table} RENAME COLUMN "{source_column}" TO "{dest_column}"',
                         dict (parameters, dest_table = dest_table, source_column = column, dest_column = column.lower ()))
        else:
            execute (conn, 'ALTER TABLE {dest_table} DROP COLUMN "{source_column}"',
                     dict (parameters, dest_table = dest_table, source_column = column, dest_column = column.lower ()))

    execute (conn, """COMMIT""", parameters);

    # concat the input tables
    for source_table in sorted (meta.tables.keys ()):
        if not table_mask.match (source_table):
            continue
        log (logging.DEBUG, "    Copying table %s" % source_table)

        source_columns = ['"' + column + '"'          for column in column_set.keys ()]
        dest_columns   = ['"' + column.lower () + '"' for column in column_set.keys ()]

        execute (conn, """
        INSERT INTO {dest_table} ({dest_columns})
        SELECT {source_columns}
        FROM {fdw}."{source_table}"
        """, dict (parameters, source_table = source_table, dest_table = dest_table, fdw = fdw,
                   source_columns = ', '.join (source_columns),
                   dest_columns = ', '.join (dest_columns)))
예제 #58
0
파일: main.py 프로젝트: cceh/ntg
def suggest_json ():
    """Endpoint.  The suggestion drop-downs in the navigator.

    Serves a list of books, chapters, verses, or words that the user can enter
    in the navigation gadget.  It suggests only entities that are actually in
    the database.

    """

    auth ()

    # the name of the current field
    field   = request.args.get ('currentfield')

    # the term the user entered in the current field
    term    = request.args.get ('term') or ''
    term    = '^' + re.escape (term.split ('-')[0])

    # terms entered in previous fields
    siglum  = request.args.get ('siglum')  or ''
    chapter = request.args.get ('chapter') or 'All'
    verse   = request.args.get ('verse')   or '1'

    Words = collections.namedtuple (
        'Words', 'kapanf, versanf, wortanf, kapend, versend, wortend, lemma')

    res = []
    with current_app.config.dba.engine.begin () as conn:

        if field == 'siglum':
            # only show those books that actually are in the database
            res = execute (conn, """
            SELECT DISTINCT siglum, siglum, book, bk_id
            FROM passages_view b
            WHERE siglum ~ :term OR book ~ :term
            ORDER BY bk_id
            """, dict (parameters, term = term))
            return flask.json.jsonify (
                [ { 'value' : r[0], 'label' : r[1], 'description' : r[2] } for r in res ])

        if field == 'chapter':
            res = execute (conn, """
            SELECT range, range
            FROM ranges_view
            WHERE siglum = :siglum AND range ~ '[1-9][0-9]*' AND range ~ :term
            ORDER BY range::integer
            """, dict (parameters, siglum = siglum, term = term))
            return flask.json.jsonify ([ { 'value' : r[0], 'label' : r[1] } for r in res ])

        if field == 'verse':
            res = execute (conn, """
            SELECT DISTINCT verse, verse
            FROM passages_view
            WHERE variant AND siglum = :siglum AND chapter = :chapter AND verse::varchar ~ :term
            ORDER BY verse
            """, dict (parameters, siglum = siglum, chapter = chapter, term = term))
            return flask.json.jsonify ([ { 'value' : r[0], 'label' : r[1] } for r in res ])

        if field == 'word':
            res = execute (conn, """
            SELECT chapter, verse, word,
                            adr2chapter (p.endadr), adr2verse (p.endadr), adr2word (p.endadr),
                            COALESCE (string_agg (n.lemma, ' ' ORDER BY n.begadr), '') as lemma
            FROM passages_view p
            LEFT JOIN nestle n
              ON (p.passage @> n.passage)
            WHERE variant AND siglum = :siglum AND chapter = :chapter AND verse = :verse AND word::varchar ~ :term
            GROUP BY chapter, verse, word, p.endadr
            ORDER BY word, adr2verse (p.endadr), adr2word (p.endadr)
            """, dict (parameters, siglum = siglum, chapter = chapter, verse = verse, term = term))
            res = map (Words._make, res)
            res = map (_f_map_word, res)
            return flask.json.jsonify (
                [ { 'value' : r[0], 'label' : r[1], 'description' : r[2] } for r in res ])

    return flask.json.jsonify ([])
예제 #59
0
    db = db_tools.PostgreSQLEngine (**config)

    log (logging.INFO, "Saving changes ...")

    if args.output == '-':
        fp = sys.stdout
    else:
        fp = open (args.output, 'w', encoding='utf-8')

    with db.engine.begin () as conn:
        fp.write ('<?xml version="1.0" encoding="utf-8" ?>\n\n')

        fp.write ('<sql profile="%s">\n' % args.profile)

        res = execute (conn, """
        SELECT (table_to_xml ('export_cliques', true, false, ''))
        """, parameters)

        fp.write (res.fetchone ()[0])
        fp.write ('\n')

        res = execute (conn, """
        SELECT (table_to_xml ('export_ms_cliques', true, false, ''))
        """, parameters)

        fp.write (res.fetchone ()[0])
        fp.write ('\n')

        res = execute (conn, """
        SELECT (table_to_xml ('export_locstem', true, false, ''))
        """, parameters)
예제 #60
0
파일: main.py 프로젝트: cceh/ntg
def relatives_csv (passage_or_id, hs_hsnr_id):
    """Output a table of the nearest relatives of a manuscript.

    Output a table of the nearest relatives/ancestors/descendants of a
    manuscript and what they attest.

    """

    auth ()

    type_     = request.args.get ('type') or 'rel'
    chapter   = request.args.get ('range') or 'All'
    limit     = int (request.args.get ('limit') or 0)
    labez     = request.args.get ('labez') or 'all'
    mode      = request.args.get ('mode') or 'sim'
    include   = request.args.getlist ('include[]') or []
    fragments = request.args.getlist ('fragments[]') or []

    view = 'affinity_view' if mode == 'rec' else 'affinity_p_view'

    where = ''
    if type_ == 'anc':
        where =  ' AND older < newer'
    if type_ == 'des':
        where =  ' AND older >= newer'

    if labez == 'all':
        where += " AND labez !~ '^z'"
    elif labez == 'all+lac':
        pass
    else:
        where += " AND labez = '%s'" % labez

    if 'fragments' in fragments:
        frag_where = ''
    else:
        frag_where = 'AND aff.common > aff.ms1_length / 2'

    limit = '' if limit == 0 else ' LIMIT %d' % limit

    with current_app.config.dba.engine.begin () as conn:

        passage   = Passage (conn, passage_or_id)
        ms        = Manuscript (conn, hs_hsnr_id)
        rg_id     = passage.range_id (chapter)

        exclude = get_excluded_ms_ids (conn, include)

        # Get the X most similar manuscripts and their attestations
        res = execute (conn, """
        /* get the LIMIT closest ancestors for this node */
        WITH ranks AS (
          SELECT ms_id1, ms_id2,
            rank () OVER (ORDER BY affinity DESC, common, older, newer DESC, ms_id2) AS rank,
            affinity
          FROM {view} aff
          WHERE ms_id1 = :ms_id1 AND aff.rg_id = :rg_id AND ms_id2 NOT IN :exclude
            AND newer > older {frag_where}
          ORDER BY affinity DESC
        )

        SELECT r.rank,
               aff.ms_id2 as ms_id,
               ms.hs,
               ms.hsnr,
               aff.ms2_length,
               aff.common,
               aff.equal,
               aff.older,
               aff.newer,
               aff.unclear,
               aff.common - aff.equal - aff.older - aff.newer - aff.unclear as norel,
               CASE WHEN aff.newer < aff.older THEN ''
                    WHEN aff.newer = aff.older THEN '-'
                    ELSE '>'
               END as direction,
               aff.affinity,
               a.labez
        FROM
          {view} aff
        JOIN apparatus_view_agg a
          ON aff.ms_id2 = a.ms_id
        JOIN manuscripts ms
          ON aff.ms_id2 = ms.ms_id
        LEFT JOIN ranks r
          ON r.ms_id2 = aff.ms_id2
        WHERE aff.ms_id2 NOT IN :exclude AND aff.ms_id1 = :ms_id1
              AND aff.rg_id = :rg_id AND aff.common > 0
              AND a.pass_id = :pass_id {where} {frag_where}
        ORDER BY affinity DESC, r.rank, newer DESC, older DESC, hsnr
        {limit}
        """, dict (parameters, where = where, frag_where = frag_where,
                   ms_id1 = ms.ms_id, hsnr = ms.hsnr,
                   pass_id = passage.pass_id, rg_id = rg_id, limit = limit,
                   view = view, exclude = exclude))

        Relatives = collections.namedtuple (
            'Relatives',
            'rank ms_id hs hsnr length common equal older newer unclear norel direction affinity labez'
        )
        return csvify (Relatives._fields, list (map (Relatives._make, res)))