Exemple #1
0
    def GET(self, name):
        """List a work if UUID provided otherwise list all works"""
        work_id = web.input().get('uuid') or web.input().get('UUID')

        if work_id:
            results = Work.get_from_work_id(work_id)
            sort = ""
        else:
            filters = web.input().get('filter')
            sort = web.input().get('sort')
            order = web.input().get('order', 'asc')
            clause, params = build_parms(filters)
            if sort:
                validate_sorting_or_fail(["title"], sort, order)
            results = Work.get_all(clause, params)

        if not results:
            raise Error(NORESULT)

        include_relatives = work_id is not None
        data = results_to_works(results, include_relatives)

        if sort:
            # we sort by each work's (first) title
            return sort_alphabetically(data, sort, order)
        return data
def addtable():
    table = request.form['work']
    work = Work(title=table, creator_id=flask_login.current_user.get_id())
    db = get_db()
    cur = db.cursor()
    work.insert(cur)
    db.commit()
    return redirect(url_for('home'))
def posts_table():
    content = request.json["content"]
    work = Work(title=content, creator_id=flask_login.current_user.get_id())
    print(content)
    db = get_db()
    cur = db.cursor()
    work.insert(cur)
    db.commit()

    return "ok", 201
    def test_clear_datastore(self):
        """Tests if the database is being cleared and considers non-empty lists
           of instances for all the models used by the application.
        """

        self.assertNotEquals(Word.query().fetch(), [])
        self.assertNotEquals(Work.query().fetch(), [])
        self.assertNotEquals(Character.query().fetch(), [])
        self.assertNotEquals(list(FileMetadata.all().run()), [])

        self.testapp.get('/')

        self.assertEquals(Word.query().fetch(), [])
        self.assertEquals(Work.query().fetch(), [])
        self.assertEquals(Character.query().fetch(), [])
        self.assertEquals(list(FileMetadata.all().run()), [])
def _get_word_mentions_by_char(word_name, work_title, char_name):
    """Get the words that a said by a character of a certain work

    Args:
        word_name: the string of the word being searched (lowercase).
        work_title: the title of the work in which the character appears
            (titlecase).
        char_name: the name of the character (titlecase).

    Returns:
        A dictionary indexed by the work and the characters. This redundant data
        is created in order to comply with the data pattern.
    """

    word = Word.get_by_id(word_name)
    if not word:
        return {}, 0
    work = Work.get_by_id(work_title, parent=word.key)
    if not work:
        return {}, 0
    char = Character.get_by_id(char_name, parent=work.key)
    if not char:
        return {}, 0
    mentions = char.get_string_mentions()
    bold_mentions = _bold_mentions(word_name, mentions)
    mentions_dict = {work_title: {char_name: bold_mentions}}
    return mentions_dict, char.count
def _get_word_mentions_in_work(word_name, work_title):
    """Get all mentions of a word that appear in a certain work.

    Args:
        word_name: the string of the word being searched (lowercase).
        work_title: the title of the work (titlecase).

    Returns:
        A dictionary first indexed by work and second by character. The work is
        inserted to comply with the data pattern.
    """

    word = Word.get_by_id(word_name)
    if not word:
        return {}, 0
    work = Work.get_by_id(work_title, parent=word.key)
    if not work:
        return {}, 0
    chars = Character.query(ancestor=work.key).fetch()
    mentions_dict = {work_title: {}}
    for char in chars:
        mentions = char.get_string_mentions()
        bold_mentions = _bold_mentions(word_name, mentions)
        mentions_dict[work_title][char.name] = bold_mentions
    return mentions_dict, work.count
Exemple #7
0
 def _parse_work(soup):
     work_name = soup.h3.text
     work_currency, work_price = [s.text for s in soup.find_all('span')]
     if work_currency != 'USD':
         work_price = gbp_to_usd(work_price)
         work_currency = 'USD'
     work = Work(work_name, work_currency, work_price)
     return work
 def get(self):
     """Clears the datastore."""
     ndb.delete_multi(Word.query().fetch(keys_only=True))
     ndb.delete_multi(Work.query().fetch(keys_only=True))
     ndb.delete_multi(Character.query().fetch(keys_only=True))
     ndb.delete_multi(Line.query().fetch(keys_only=True))
     db.delete(FileMetadata.all(keys_only=True).run())
     self.redirect('/admin')
 def _parse_work(soup):
     work_name = soup.h3.text
     work_currency_and_price = soup.find_all('div')[1].text
     work_currency, work_price = work_currency_and_price.split(' ')
     if work_currency != 'USD':
         work_price = gbp_to_usd(work_price)
         work_currency = 'USD'
     work = Work(work_name, work_currency, work_price)
     return work
Exemple #10
0
    def DELETE(self, name):
        """Delete a work"""
        logger.debug("Data: %s" % (web.input()))

        work_id = web.input().get('UUID') or web.input().get('uuid')

        require_params_or_fail([work_id], 'a (work) UUID')

        work = Work.find_or_fail(work_id)
        work.delete()
        return []
Exemple #11
0
    def POST(self, name):
        """Create a work relation"""
        data = json.loads(web.data().decode('utf-8'))
        parent_uuid = data.get('parent_UUID') or data.get('parent_uuid')
        child_uuid = data.get('child_UUID') or data.get('child_uuid')

        require_params_or_fail([parent_uuid, child_uuid],
                               'a parent and a child UUID')

        parent = Work.find_or_fail(parent_uuid)
        child = Work.find_or_fail(child_uuid)

        parent.set_children([child.UUID])
        parent.save()

        parent.load_titles()
        parent.load_identifiers()
        parent.load_children()
        parent.load_parents()

        return [parent.__dict__]
Exemple #12
0
    def DELETE(self, name):
        """Delete a title"""
        work_id = web.input().get('UUID') or web.input().get('uuid')
        title = web.input().get('title')

        require_params_or_fail([title, work_id], "(work) UUID and title")

        work = Work.find_or_fail(work_id, titles=[title])
        work.delete_titles()
        work.load_titles()
        work.load_identifiers()

        return [work.__dict__]
def index_reduce(key, values):
    """Index reduce function.
    Args:
        key: a string in the format <word>_SEP<work>_SEP<character>
        values: the lines in which <word> appears in <work> in a speak of
            <character>

    The word is either added to the database or updated with its new occurence,
    adding info about the work in which it was found, which character pronounced
    it (if applicable), a count of occurrences and a reference to the line in
    which it was found.
    """
    keys = key.split(_SEP)
    word_value, work_value, char_value = keys
    word = Word.get_by_id(word_value)
    work_titlecase = titlecase(work_value)
    if not word:
        word = Word(id=word_value, name=word_value, count=len(values))
        work = Work(parent=word.key, id=work_titlecase,
                        title=work_titlecase, count=len(values))
    else:
        word.count += len(values)
        work = Work.get_by_id(work_titlecase, parent=word.key)
        if work:
            work.count += len(values)
        else:
            work = Work(parent=word.key, id=work_titlecase,
                title=work_titlecase, count=len(values))
    character_titlecase = titlecase(char_value)
    char = Character(parent=work.key, id=character_titlecase,
        name=character_titlecase, count= len(values))
    for line in set(values):
        char.mentions.append(pickle.loads(line))
    word.put()
    work.put()
    char.put()
def _get_word_works(word_name):
    """Retrieves all the works in which a word occurs.

    Args:
        word_name: the word (lowercase).

    Returns:
        A list with the titles of the works.
    """

    word_db = Word.get_by_id(word_name)
    if not word_db:
        return []
    work_titles = [work_db.title for work_db in
        Work.query(ancestor=word_db.key).fetch()]
    return work_titles
    def test_filter_entities_using_query_works(self):
        '''We can search for all the entities starting from a word.'''
        retrieved_word = Word.get_by_id("death")  
        self.assertEqual('death', retrieved_word.name)
        self.assertEqual(2, retrieved_word.count)

        retrieved_works = Work.query(ancestor=self.word.key).fetch()
        self.assertEqual(len(retrieved_works), 1)
        work = retrieved_works[0]

        retrieved_character = Character.query(ancestor=work.key).fetch()
        self.assertEqual(len(retrieved_character), 1)
        char = retrieved_character[0]
        self.assertEqual(1, len(char.mentions))
        self.assertEqual("Though yet of Hamlet our dear brother's death", 
            char.mentions[0].get().line)
Exemple #16
0
    def POST(self, name):
        """Add titles to an existing work"""
        data = json.loads(web.data().decode('utf-8'))
        title = data.get('title')
        work_id = data.get('UUID') or data.get('uuid')

        titles = strtolist(title)
        require_params_or_fail([work_id], "a (work) UUID")
        require_params_or_fail([titles], "at least a title")

        work = Work.find_or_fail(work_id, titles=titles)
        work.save()
        work.load_titles()
        work.load_identifiers()

        return [work.__dict__]
Exemple #17
0
    def DELETE(self, name):
        """Delete an identifier"""
        work_id = web.input().get('UUID') or web.input().get('uuid')
        uri = web.input().get('URI') or web.input().get('uri')

        require_params_or_fail([uri, work_id], "a (work) UUID and a URI")

        try:
            scheme, value = Identifier.split_uri(uri)
            uris = [{'URI': uri}]
        except Exception:
            raise Error(BADPARAMS, msg="Invalid URI '%s'" % (uri))

        work = Work.find_or_fail(work_id, uris)

        work.delete_uris()
        work.load_identifiers()

        return [work.__dict__]
def _get_work_characters(word_name, work_title):
    """Retrieves all the characters that mentions a word in a given work.

    Args:
        word_name: the string of the word which the characters mention
            (lowercase).
        work_title: the title of the work of interest (titlecase).

    Returns:
        A list with the names of the characters.
    """

    word_db = Word.get_by_id(word_name)
    if not word_db:
        return []
    work_db = Work.get_by_id(work_title, parent=word_db.key)
    if not work_db:
        return []
    char_names = [char_db.name for char_db in
        Character.query(ancestor=work_db.key).fetch()]
    return char_names
    def setUp(self):
        ''' Creates an instance of Testbed class and initializes it with the 
        datastore stub.

        Also creates the entities and stores them in the database.'''
        self.testbed = testbed.Testbed()
        self.testbed.activate()
        self.testbed.init_datastore_v3_stub()

        self.word = Word(id="death", name="death", count=2)
        self.work = Work(
            parent=self.word.key, id="Hamlet", title="Hamlet", count=1)
        self.character = Character(
            parent=self.work.key, id="Claudius", name="Claudius", count=1)
        line = Line(line='Though yet of Hamlet our dear brother\'s death').put()

        self.character.mentions = [line]

        self.word_key = self.word.put()
        self.work_key = self.work.put()
        self.character_key = self.character.put()
    def get(self):
        """Retrieves formatted information to the treemap visualization. It
           expects a list of elements, and each element is a list of the
           following type:

           [name, parent's name, value, color value]

           In which name and parent's name are strings, value is an integer
           proportional to the size of the resulting rectangle on the treemap
           and color value is the value to be used as color acording to the
           color range.

           It is called the function get_all_word_mentions to obtain a
           dictionary that maps from work and character to mentions.
        """
        searched_value = cgi.escape(self.request.get('searched_word').lower())

        if not searched_value:
            return
        
        all_mentions, count = _get_all_word_mentions(searched_value)
        if not count:
            return

        treemap_data = [['Location', 'Parent', 'Word Occurrences'],
            ['Shakespeare\'s Corpus', None, count]]

        word_db = Word.get_by_id(searched_value)
        for work in all_mentions:
            work_db = Work.get_by_id(work, parent=word_db.key)
            treemap_data.append([work, 'Shakespeare\'s Corpus', work_db.count]) 
            for char in all_mentions[work]:
                if not char:
                    continue
                char_db = Character.get_by_id(char, parent=work_db.key)
                treemap_data.append([{'v': work + '+' + char, 'f': char}, work, 
                    char_db.count])

        self.response.headers['Content-Type'] = 'text/json'
        self.response.out.write(json.encode({"array": treemap_data}))
Exemple #21
0
    def POST(self, name):
        """Add identifiers to an existing work"""
        data = json.loads(web.data().decode('utf-8'))
        uri = data.get('URI') or data.get('uri')
        canonical = data.get('canonical') in (True, "true", "True")
        work_id = data.get('UUID') or data.get('uuid')

        require_params_or_fail([uri, work_id], "a (work) UUID and a URI")

        try:
            scheme, value = Identifier.split_uri(uri)
            uris = [{'URI': uri, 'canonical': canonical}]
        except Exception:
            raise Error(BADPARAMS, msg="Invalid URI '%s'" % (uri))

        UriScheme.find_or_fail(scheme)

        work = Work.find_or_fail(work_id, uris=uris)
        work.save()
        work.load_identifiers()

        return [work.__dict__]
def _get_all_word_mentions(word_name):
    """Get all the mentions of a certain word string representation accessed
       first by work and then by character.

    Args:
        word_name: the string representation of the word.

    Returns:
        A dictionary of dictionaries, being the first key the work title and the
        second, the character name.
    """
    all_mentions = {}
    word = Word.get_by_id(word_name)
    if not word:
        return {}, 0
    works = Work.query(ancestor=word.key)
    for work in works:
        work_chars = Character.query(ancestor=work.key)
        all_mentions[work.title] = {}
        for char in work_chars:
            mentions = char.get_string_mentions()
            bold_mentions = _bold_mentions(word.name, mentions)
            all_mentions[work.title][char.name] = bold_mentions
    return all_mentions, word.count
Exemple #23
0
    def POST(self, name):
        """Create a work"""
        data = json.loads(web.data().decode('utf-8'))
        wtype = data.get('type', '')
        title = data.get('title')
        uri = data.get('URI') or data.get('uri')
        parent = data.get('parent')
        child = data.get('child')

        titles = strtolist(title)
        uris = strtolist(uri)
        require_params_or_fail([wtype], 'a (work) type')
        require_params_or_fail(titles, 'at least one title')
        require_params_or_fail(uris, 'at least one URI')
        WorkType.find_or_fail(wtype)

        for i in uris:
            # attempt to get scheme from URI
            try:
                ident = i.get('URI') or i.get('uri')
                scheme, value = Identifier.split_uri(ident)
                try:
                    i['canonical'] = i['canonical'] in (True, "true", "True")
                except Exception:
                    i['canonical'] = False
            except Exception:
                identifier = ident if ident else ''
                raise Error(BADPARAMS, msg="Invalid URI '%s'" % (identifier))
            # check whether the URI scheme exists in the database
            UriScheme.find_or_fail(scheme)

        # instantiate a new work with the input data
        uuid = generate_uuid()
        work = Work(uuid, wtype, titles, uris)

        # check relatives and associate them with the work
        work.check_and_set_relatives(parent, child)
        work.save()

        return [work.__dict__]
Exemple #24
0

def make_dicts(cursor, row):
    return dict(
        (cursor.description[idx][0], value) for idx, value in enumerate(row))


db = sqlite3.connect('.data/db.sqlite')
db.row_factory = make_dicts

cur = db.cursor()

User.create_table(cur)
Task.create_table(cur)
Column.create_table(cur)
Work.create_table(cur)

users = [
    User("Rida", "*****@*****.**", "12345"),
    User("Pavel", "*****@*****.**", "12345"),
    User("Toto", "*****@*****.**", "12345"),
]

tasks = [
    Task(content="Conception", author_id="*****@*****.**", column_id=1),
    Task(content="Analysis", author_id="*****@*****.**", column_id=2),
    Task(content="Test", author_id="*****@*****.**", column_id=1),
    Task(content="Repport", author_id="*****@*****.**", column_id=1),
    Task(content="Algorithms", author_id="*****@*****.**", column_id=3),
]
class DatastoreTest(unittest.TestCase):
    def setUp(self):
        ''' Creates an instance of Testbed class and initializes it with the 
        datastore stub.

        Also creates the entities and stores them in the database.'''
        self.testbed = testbed.Testbed()
        self.testbed.activate()
        self.testbed.init_datastore_v3_stub()

        self.word = Word(id="death", name="death", count=2)
        self.work = Work(
            parent=self.word.key, id="Hamlet", title="Hamlet", count=1)
        self.character = Character(
            parent=self.work.key, id="Claudius", name="Claudius", count=1)
        line = Line(line='Though yet of Hamlet our dear brother\'s death').put()

        self.character.mentions = [line]

        self.word_key = self.word.put()
        self.work_key = self.work.put()
        self.character_key = self.character.put()

    def tearDown(self):
        '''Deactivate the testbed. 
        This restores the original stubs so that tests do not interfere with 
        each other.'''

        self.word_key.delete()
        self.work_key.delete()
        self.character_key.delete()

        self.testbed.deactivate()

    def test_insert_entities(self):
        '''Ensures that the entities are saved in the database.

        If we can retrieved they are correctly stored.'''
        retrieved_word = self.word_key.get()
        self.assertEqual(2, retrieved_word.count)
        self.assertEqual(2, retrieved_word.count)

        retrieved_work = self.work_key.get()
        self.assertEqual('Hamlet', retrieved_work.title)

        retrieved_character = self.character_key.get()
        self.assertEqual('Claudius', retrieved_character.name)
        self.assertEqual(1, len(retrieved_character.mentions))
        self.assertEqual('Though yet of Hamlet our dear brother\'s death', 
            retrieved_character.mentions[0].get().line)

    def test_searching_a_non_existing_word(self):
        '''Ensure nothing fails if we search a word that doesn't exist.'''
        retrieved_word = Word.get_by_id("sdfgfdgdgf")   
        self.assertEqual(retrieved_word, None)   

    def test_filter_entities_using_query_works(self):
        '''We can search for all the entities starting from a word.'''
        retrieved_word = Word.get_by_id("death")  
        self.assertEqual('death', retrieved_word.name)
        self.assertEqual(2, retrieved_word.count)

        retrieved_works = Work.query(ancestor=self.word.key).fetch()
        self.assertEqual(len(retrieved_works), 1)
        work = retrieved_works[0]

        retrieved_character = Character.query(ancestor=work.key).fetch()
        self.assertEqual(len(retrieved_character), 1)
        char = retrieved_character[0]
        self.assertEqual(1, len(char.mentions))
        self.assertEqual("Though yet of Hamlet our dear brother's death", 
            char.mentions[0].get().line)