def test_quote_readable_matching(self):
        volley = [
            ('1000', CRONY_TITLE, CRONY_AUTHOR, "CRONY BELIEFS (SIMLER)", CRONY_QUOTE),
            ('1001', "Thinking in Systems: A Primer", "Donna H. Meadows", "THINKING IN SYSTEMS A PRIMER (MEADOWS)", "XXX."),
        ]

        for v in volley:
            source_id, title, author, exp_slug, content = v
            r = Readable.CreateOrUpdate(self.u, source_id, title=title, author=author, source="test")
            r.put()
            Readable.put_sd_batch([r])

            self.assertEqual(r.slug, exp_slug)

            author_names = author.split(' ')
            source = "%s (%s, %s)" % (title, author_names[-1], author_names[0])
            q = Quote.Create(self.u, source, content)
            q.put()
            self.assertIsNotNone(q.readable)
            self.assertEqual(q.readable, r.key)
            self.assertEqual(q.source_slug(), exp_slug)
            r = Readable.GetByTitleAuthor(self.u, author, title)
            self.assertIsNotNone(r)
            self.assertEqual(r.source_id, source_id)

        # Create another quote with no readable to link to
        q = Quote.Create(self.u, "xxx", "content...")
        q.put()

        self.assertIsNone(q.readable)

        # Fetch quotes for readable
        quotes = Quote.Fetch(self.u, readable_id=r.key.id())
        self.assertEqual(len(quotes), 1)
        self.assertEqual(quotes[0].source, source)
Exemplo n.º 2
0
 def search(self, d):
     term = self.request.get('term')
     self.success, self.message, readables = Readable.Search(self.user, term)
     data = {
         'readables': [r.json() for r in readables if r]
     }
     self.set_response(data)
Exemplo n.º 3
0
 def update(self, d):
     id = self.request.get('id')
     params = tools.gets(self,
         integers=['type'],
         strings=['notes', 'title', 'url', 'author', 'source'],
         booleans=['read', 'favorite'],
         lists=['tags'])
     if id:
         r = self.user.get(Readable, id=id)
     else:
         # New
         r = Readable.CreateOrUpdate(self.user, None, **params)
     if r:
         r.Update(**params)
         if r.source == 'pocket':
             access_token = self.user.get_integration_prop('pocket_access_token')
             if access_token:
                 from services import pocket
                 if params.get('favorite') == 1:
                     pocket.update_article(access_token, r.source_id, action='favorite')
                 if params.get('read') == 1:
                     pocket.update_article(access_token, r.source_id, action='archive')
         r.put()
         self.success = True
     self.set_response({
         'readable': r.json() if r else None
     })
Exemplo n.º 4
0
def get_books_on_shelf(user, shelf='currently-reading'):
    '''
    Return JSON array {title, author, isbn, image}
    '''
    user_id = user.get_integration_prop('goodreads_user_id')
    readables = []
    success = False
    if user_id:
        data = urllib.urlencode({
            'shelf': shelf,
            'key': GR_API_KEY,
            'v': 2
        })
        params = data
        url = "https://www.goodreads.com/review/list/%s.xml?%s" % (user_id, params)
        logging.debug("Fetching %s for %s" % (url, user))
        res = urlfetch.fetch(
            url=url,
            method=urlfetch.GET,
            validate_certificate=True)
        logging.debug(res.status_code)
        if res.status_code == 200:
            xml = res.content
            data = etree.parse(StringIO(xml))
            for r in data.getroot().find('reviews').findall('review'):
                book = r.find('book')
                isbn = book.find('isbn13').text
                image_url = book.find('image_url').text
                title = book.find('title').text
                authors = book.find('authors')
                link = book.find('link').text
                first_author = authors.find('author')
                if first_author is not None:
                    name = first_author.find('name')
                    if name is not None:
                        author = name.text
                r = Readable.CreateOrUpdate(user, isbn, title=title,
                                            url=link, source='goodreads',
                                            image_url=image_url, author=author,
                                            type=READABLE.BOOK,
                                            read=False)
                readables.append(r)
            success = True
        logging.debug("Putting %d readable(s)" % len(readables))
        ndb.put_multi(readables)
        Readable.put_sd_batch(readables)
    return (success, readables)
Exemplo n.º 5
0
    def test_quote_readable_matching(self):
        volley = [
            ('1000', "Crony Beliefs", "Kevin Simler", "CRONY BELIEFS (SIMLER)",
             "I contend that the best way to understand all the crazy beliefs out there — aliens, conspiracies, and all the rest — is to analyze them as crony beliefs. Beliefs that have been \"hired\" not for the legitimate purpose of accurately modeling the world, but rather for social and political kickbacks."
             ),
            ('1001', "Thinking in Systems: A Primer", "Donna H. Meadows",
             "THINKING IN SYSTEMS A PRIMER (MEADOWS)", "XXX."),
        ]

        for v in volley:
            source_id, title, author, exp_slug, content = v
            r = Readable.CreateOrUpdate(self.u,
                                        source_id,
                                        title=title,
                                        author=author,
                                        source="test")
            r.put()
            Readable.put_sd_batch([r])

            self.assertEqual(r.slug, exp_slug)

            author_names = author.split(' ')
            source = "%s (%s, %s)" % (title, author_names[-1], author_names[0])
            q = Quote.Create(self.u, source, content)
            q.put()
            self.assertIsNotNone(q.readable)
            self.assertEqual(q.readable, r.key)
            self.assertEqual(q.source_slug(), exp_slug)
            r = Readable.GetByTitleAuthor(self.u, author, title)
            self.assertIsNotNone(r)
            self.assertEqual(r.source_id, source_id)

        # Create another quote with no readable to link to
        q = Quote.Create(self.u, "xxx", "content...")
        q.put()

        self.assertIsNone(q.readable)

        # Fetch quotes for readable
        quotes = Quote.Fetch(self.u, readable_id=r.key.id())
        self.assertEqual(len(quotes), 1)
        self.assertEqual(quotes[0].source, source)
Exemplo n.º 6
0
    def get(self, d):
        hack_id = self.request.get('hack_id')
        res = {}
        if hack_id == 'index_quotes_readables':
            page = self.request.get_range('page')
            PAGE_SIZE = 50
            index_lookup = {}  # index_name -> (index, list of items)
            for q in Quote.query().fetch(limit=PAGE_SIZE, offset=page * PAGE_SIZE):
                sd, index = q.update_sd(index_put=False)
                if index and index.name not in index_lookup:
                    index_lookup[index.name] = (index, [sd])
                else:
                    index_lookup[index.name][1].append(sd)
            for r in Readable.query().fetch(limit=PAGE_SIZE, offset=page * PAGE_SIZE):
                sd, index = r.update_sd(index_put=False)
                if index and index.name not in index_lookup:
                    index_lookup[index.name] = (index, [sd])
                else:
                    index_lookup[index.name][1].append(sd)
            if index_lookup:
                n = 0
                for index_tuple in index_lookup.values():
                    index, items = index_tuple
                    index.put(items)
                    n += len(items)
                res['result'] = "Put %d items in %d indexes" % (n, len(index_tuple))
                res['page'] = page

        elif hack_id == 'normalize_key_props':
            dbp = []
            for hd in HabitDay.query().iter():
                habit_key = hd.habit
                if habit_key.parent() is None:
                    # Need to update
                    hd.habit = ndb.Key('User', hd.key.parent().id(), 'Habit', int(habit_key.id()))
                    dbp.append(hd)
            res['habitdays'] = len(dbp)
            ndb.put_multi(dbp)
            dbp = []
            for jrnl in MiniJournal.query().iter():
                changes = False
                for i, tag_key in enumerate(jrnl.tags):
                    if tag_key.parent() is None:
                        # Need to update
                        jrnl.tags[i] = ndb.Key('User', jrnl.key.parent().id(), 'JournalTag', tag_key.id())
                        changes = True
                if changes:
                    dbp.append(jrnl)
            res['journals'] = len(dbp)
            ndb.put_multi(dbp)

        else:
            res['result'] = 'hack_id not found'
        self.json_out(res)
Exemplo n.º 7
0
    def test_quote_readable_matching(self):
        volley = [
            ('1000', CRONY_TITLE, CRONY_AUTHOR, "CRONY BELIEFS (SIMLER)",
             CRONY_QUOTE),
            ('1001', "Thinking in Systems: A Primer", "Donna H. Meadows",
             "THINKING IN SYSTEMS A PRIMER (MEADOWS)", "XXX."),
        ]

        for v in volley:
            source_id, title, author, exp_slug, content = v
            r = Readable.CreateOrUpdate(self.u,
                                        source_id,
                                        title=title,
                                        author=author,
                                        source="test")
            r.put()
            Readable.put_sd_batch([r])

            self.assertEqual(r.slug, exp_slug)

            author_names = author.split(' ')
            source = "%s (%s, %s)" % (title, author_names[-1], author_names[0])
            q = Quote.Create(self.u, source, content)
            q.put()
            self.assertIsNotNone(q.readable)
            self.assertEqual(q.readable, r.key)
            self.assertEqual(q.source_slug(), exp_slug)
            r = Readable.GetByTitleAuthor(self.u, author, title)
            self.assertIsNotNone(r)
            self.assertEqual(r.source_id, source_id)

        # Create another quote with no readable to link to
        q = Quote.Create(self.u, "xxx", "content...")
        q.put()

        self.assertIsNone(q.readable)

        # Fetch quotes for readable
        quotes = Quote.Fetch(self.u, readable_id=r.key.id())
        self.assertEqual(len(quotes), 1)
        self.assertEqual(quotes[0].source, source)
Exemplo n.º 8
0
 def random_batch(self, d):
     '''
     Return a random batch, optionally filtered
     '''
     BATCH_SIZE = 50
     sample_keys = Readable.Fetch(self.user, with_notes=True, limit=500, keys_only=True)
     if len(sample_keys) > BATCH_SIZE:
         sample_keys = random.sample(sample_keys, BATCH_SIZE)
     readables = ndb.get_multi(sample_keys)
     self.set_response({
         'readables': [r.json() for r in readables]
         }, success=True)
Exemplo n.º 9
0
 def list(self, d):
     page, max, offset = tools.paging_params(self.request)
     favorites = self.request.get_range('favorites') == 1
     with_notes = self.request.get_range('with_notes') == 1
     unread = self.request.get_range('unread') == 1
     read = self.request.get_range('read') == 1
     since = self.request.get('since')  # ISO
     readables = Readable.Fetch(self.user, favorites=favorites,
                                unread=unread, read=read,
                                with_notes=with_notes, since=since,
                                limit=max, offset=offset)
     self.set_response({
         'readables': [r.json() for r in readables]
     }, success=True)
Exemplo n.º 10
0
 def batch_create(self, d):
     readings = json.loads(self.request.get('readings'))
     source = self.request.get('source', default_value='form')
     dbp = []
     for r in readings:
         type_string = r.get('type')
         if type_string:
             r['type'] = READABLE.LOOKUP.get(type_string.lower())
         r = Readable.CreateOrUpdate(self.user, None, source=source, read=True, **r)
         dbp.append(r)
     if dbp:
         ndb.put_multi(dbp)
         self.success = True
         self.message = "Putting %d" % len(dbp)
     self.set_response()
Exemplo n.º 11
0
    def test_readable_calls(self):
        # Create
        r = Readable.CreateOrUpdate(self.u,
                                    '1234',
                                    title="An Article",
                                    source='x',
                                    url="http://www.nytimes.com/1")
        r.put()

        self.assertEqual(r.title, "An Article")
        self.assertEqual(r.url, "http://www.nytimes.com/1")

        # List
        response = self.get_json("/api/readable", {}, headers=self.api_headers)
        r = response.get('readables')[0]
        self.assertEqual(r.get('title'), "An Article")

        # Update
        params = {
            'id': r.get('id'),
            'title': 'New Article Name',
            'author': "Andy Clark",
            'source': "New Source",
            'excerpt': "Excerpt...",
            'notes': "Notes...",
            'word_count': 1850,
            'url': 'http://www.example.com'
        }
        response = self.post_json("/api/readable",
                                  params,
                                  headers=self.api_headers)
        r = response.get('readable')
        for key, val in params.items():
            self.assertEqual(r.get(key), val)

        # Search
        response = self.get_json("/api/readable/search", {'term': "clark"},
                                 headers=self.api_headers)
        readables = response.get('readables')
        self.assertEqual(len(readables), 1)

        # Delete
        response = self.post_json("/api/readable/delete", {'id': r.get('id')},
                                  headers=self.api_headers)
        r = self.u.get(Readable, id=r.get('id'))
        self.assertIsNone(r)  # Confirm deletion
Exemplo n.º 12
0
    def test_evernote_webhook(self, get_note_mocked):
        EN_NOTE_GUID = "1000-0815-aefe-b8a0-8888"
        EN_USER_ID = "1001"
        EN_NOTEBOOK_ID = "ffff-0000"

        self.u.evernote_id = EN_USER_ID
        self.u.set_integration_prop('evernote_notebook_ids', EN_NOTEBOOK_ID)
        self.u.put()

        # Test article clip
        # Mock return from Evernote service
        get_note_mocked.return_value = (EN_NOTE_GUID, MEDIUM_TITLE,
                                        MEDIUM_FULL_CONTENT, MEDIUM_URL)

        self.get_json("/api/integrations/evernote/webhook", {
            'reason': 'create',
            'guid': EN_NOTE_GUID,
            'notebookGuid': EN_NOTEBOOK_ID,
            'userId': self.u.evernote_id
        },
                      headers=self.api_headers)
        readables = Readable.Fetch(self.u)
        self.assertEqual(len(readables), 1)
        r = readables[0]
        self.assertEqual(r.title, MEDIUM_TITLE)
        self.assertEqual(r.url, MEDIUM_URL)

        # Test quote/excerpt clip
        get_note_mocked.return_value = (EN_NOTE_GUID, CRONY_TITLE, CRONY_QUOTE,
                                        CRONY_URL)

        self.get_json("/api/integrations/evernote/webhook", {
            'reason': 'create',
            'guid': EN_NOTE_GUID,
            'notebookGuid': EN_NOTEBOOK_ID,
            'userId': self.u.evernote_id
        },
                      headers=self.api_headers)
        quotes = Quote.Fetch(self.u)
        self.assertEqual(len(quotes), 1)
        q = quotes[0]
        self.assertEqual(q.source, CRONY_TITLE)
        self.assertEqual(q.content, CRONY_QUOTE)
Exemplo n.º 13
0
    def test_readable_calls(self):
        # Create
        r = Readable.CreateOrUpdate(self.u,
                                    '1234',
                                    title="An Article",
                                    source='x',
                                    url="http://www.nytimes.com/1")
        r.put()

        self.assertEqual(r.title, "An Article")
        self.assertEqual(r.url, "http://www.nytimes.com/1")

        # List
        response = self.get_json("/api/readable", {}, headers=self.api_headers)
        r = response.get('readables')[0]
        self.assertEqual(r.get('title'), "An Article")

        # Update
        response = self.post_json("/api/readable", {
            'id': r.get('id'),
            'title': 'New Article Name',
            'author': "Andy Clark"
        },
                                  headers=self.api_headers)
        r = response.get('readable')
        self.assertEqual(r.get('title'), 'New Article Name')
        self.assertEqual(r.get('author'), 'Andy Clark')

        # Search
        response = self.get_json("/api/readable/search", {'term': "clark"},
                                 headers=self.api_headers)
        readables = response.get('readables')
        self.assertEqual(len(readables), 1)

        # Delete
        response = self.post_json("/api/readable/delete", {'id': r.get('id')},
                                  headers=self.api_headers)
        r = self.u.get(Readable, id=r.get('id'))
        self.assertIsNone(r)  # Confirm deletion
Exemplo n.º 14
0
def sync(user, access_token, since_timestamp=0):
    '''
    Return JSON array {title, author, isbn, image}

    Sample dict from pocket:

    {u'resolved_url': u'https://arxiv.org/abs/1701.06538', u'given_title': u'', u'is_article': u'1', u'sort_id': 16, u'word_count': u'221', u'status': u'0', u'has_image': u'0', u'given_url': u'https://arxiv.org/abs/1701.06538', u'favorite': u'0', u'has_video': u'0', u'time_added': u'1485774143', u'time_updated': u'1485774143', u'time_read': u'0', u'excerpt': u'Authors: Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton, Jeff Dean  Abstract: The capacity of a neural network to absorb information is limited by its number of parameters.', u'resolved_title': u'Title: Outrageously Large Neural Networks: The Sparsely-Gated Mixture-of-Experts Layer', u'authors': {u'32207876': {u'url': u'', u'author_id': u'32207876', u'item_id': u'1576987151', u'name': u'cscs.CLcs.NEstatstat.ML'}}, u'resolved_id': u'1576987151', u'item_id': u'1576987151', u'time_favorited': u'0', u'is_index': u'0'}
    {u'resolved_url': u'http://lens.blogs.nytimes.com/2012/10/09/looking-into-the-eyes-of-made-in-china/', u'given_title': u'http://lens.blogs.nytimes.com/2012/10/09/looking-into-the-eyes-of-made-in-c', u'is_article': u'1', u'sort_id': 99, u'word_count': u'800', u'status': u'1', u'has_image': u'0', u'given_url': u'http://lens.blogs.nytimes.com/2012/10/09/looking-into-the-eyes-of-made-in-china/?partner=rss&emc=rss&smid=tw-nytimes', u'favorite': u'0', u'has_video': u'0', u'time_added': u'1349951324', u'time_updated': u'1482284773', u'time_read': u'1482284772', u'excerpt': u'Your clothes, your child\u2019s toys, even the device you use to read these words may have been made in China. They are among the $100 billion of goods that the United States imports from China each year \u2014 an exchange that has become an important issue in the 2012 presidential campaign.', u'resolved_title': u'Looking Into the Eyes of ‘Made in China’', u'authors': {u'3024958': {u'url': u'', u'author_id': u'3024958', u'item_id': u'233921121', u'name': u'KERRI MACDONALD'}}, u'resolved_id': u'233843309', u'item_id': u'233921121', u'time_favorited': u'0', u'is_index': u'0'}
    '''
    data = urllib.urlencode({
        'access_token': access_token,
        'consumer_key': POCKET_CONSUMER_KEY,
        'detailType': 'complete',
        'since': since_timestamp,
        'state': 'all'
    })
    success = False
    res = urlfetch.fetch(
        url=GET_ENDPOINT,
        payload=data,
        method=urlfetch.POST,
        deadline=60,
        validate_certificate=True)
    logging.debug(res.status_code)
    latest_timestamp = 0
    readables = []
    if res.status_code == 200:
        data = json.loads(res.content)
        articles = data.get('list', {})
        latest_timestamp = data.get('since', 0) #?
        save = []
        USE_RESOLVED_TITLE = True
        if articles:
            for id, article in articles.items():
                source = 'pocket'
                if USE_RESOLVED_TITLE:
                    title = article.get('resolved_title')
                else:
                    title = article.get('given_title')
                url = article.get('given_url')
                status = article.get('status')
                authors = article.get('authors')
                excerpt = article.get('excerpt')
                images = article.get('images')
                time_added = int(article.get('time_added', 0)) * 1000
                time_read = int(article.get('time_read', 0)) * 1000
                dt_added = tools.dt_from_ts(time_added)
                dt_read = tools.dt_from_ts(time_read) if time_read else None
                tags = article.get('tags', {}).keys()
                word_count = int(article.get('word_count', 0))
                favorite = int(article.get('favorite', 0)) == 1
                image_url = None
                author = None
                if images:
                    first_image = images.get('1')
                    if first_image:
                        image_url = first_image.get('src')
                if authors:
                    author_keys = authors.keys()
                    if author_keys:
                        author = authors.get(author_keys[0], {}).get('name')
                archived = int(status) == 1
                read = archived and (not tags or 'unread' not in tags)
                r = Readable.CreateOrUpdate(user, source_id=id, title=title, url=url,
                                            image_url=image_url, author=author,
                                            excerpt=excerpt, favorite=favorite,
                                            dt_added=dt_added, word_count=word_count,
                                            dt_read=dt_read,
                                            tags=tags, source=source, read=read)
                if r:
                    r.Update(read=archived, favorite=favorite, dt_read=dt_read)
                    save.append(r)
                    readables.append(r)
        ndb.put_multi(save)  # Save all
        Readable.put_sd_batch(save)
        success = True
    else:
        logging.debug(res.headers)
    return (success, readables, latest_timestamp)
Exemplo n.º 15
0
def sync(user, access_token):
    '''
    Return JSON array {title, author, isbn, image}

    Sample dict from pocket:

    {u'resolved_url': u'https://arxiv.org/abs/1701.06538', u'given_title': u'', u'is_article': u'1', u'sort_id': 16, u'word_count': u'221', u'status': u'0', u'has_image': u'0', u'given_url': u'https://arxiv.org/abs/1701.06538', u'favorite': u'0', u'has_video': u'0', u'time_added': u'1485774143', u'time_updated': u'1485774143', u'time_read': u'0', u'excerpt': u'Authors: Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton, Jeff Dean  Abstract: The capacity of a neural network to absorb information is limited by its number of parameters.', u'resolved_title': u'Title: Outrageously Large Neural Networks: The Sparsely-Gated Mixture-of-Experts Layer', u'authors': {u'32207876': {u'url': u'', u'author_id': u'32207876', u'item_id': u'1576987151', u'name': u'cscs.CLcs.NEstatstat.ML'}}, u'resolved_id': u'1576987151', u'item_id': u'1576987151', u'time_favorited': u'0', u'is_index': u'0'}
    {u'resolved_url': u'http://lens.blogs.nytimes.com/2012/10/09/looking-into-the-eyes-of-made-in-china/', u'given_title': u'http://lens.blogs.nytimes.com/2012/10/09/looking-into-the-eyes-of-made-in-c', u'is_article': u'1', u'sort_id': 99, u'word_count': u'800', u'status': u'1', u'has_image': u'0', u'given_url': u'http://lens.blogs.nytimes.com/2012/10/09/looking-into-the-eyes-of-made-in-china/?partner=rss&emc=rss&smid=tw-nytimes', u'favorite': u'0', u'has_video': u'0', u'time_added': u'1349951324', u'time_updated': u'1482284773', u'time_read': u'1482284772', u'excerpt': u'Your clothes, your child\u2019s toys, even the device you use to read these words may have been made in China. They are among the $100 billion of goods that the United States imports from China each year \u2014 an exchange that has become an important issue in the 2012 presidential campaign.', u'resolved_title': u'Looking Into the Eyes of ‘Made in China’', u'authors': {u'3024958': {u'url': u'', u'author_id': u'3024958', u'item_id': u'233921121', u'name': u'KERRI MACDONALD'}}, u'resolved_id': u'233843309', u'item_id': u'233921121', u'time_favorited': u'0', u'is_index': u'0'}
    '''
    dt = datetime.now() - timedelta(days=7)
    init_sync_since = tools.unixtime(dt, ms=False)
    TS_KEY = 'pocket_last_timestamp'  # Seconds
    since_timestamp = user.get_integration_prop(TS_KEY, init_sync_since)
    data = urllib.urlencode({
        'access_token': access_token,
        'consumer_key': POCKET_CONSUMER_KEY,
        'detailType': 'complete',
        'since': since_timestamp,
        'state': 'all'
    })
    success = False
    logging.debug("Syncing pocket for %s since %s" % (user, dt))
    res = urlfetch.fetch(
        url=GET_ENDPOINT,
        payload=data,
        method=urlfetch.POST,
        deadline=60,
        validate_certificate=True)
    logging.debug(res.status_code)
    latest_timestamp = 0
    readables = []
    if res.status_code == 200:
        data = json.loads(res.content)
        articles = data.get('list', {})
        latest_timestamp = data.get('since', 0) #?
        save = []
        USE_RESOLVED_TITLE = True
        if articles:
            for id, article in articles.items():
                source = 'pocket'
                if USE_RESOLVED_TITLE:
                    title = article.get('resolved_title')
                else:
                    title = article.get('given_title')
                url = article.get('given_url')
                status = article.get('status')
                authors = article.get('authors')
                excerpt = article.get('excerpt')
                images = article.get('images')
                time_added = int(article.get('time_added', 0)) * 1000
                time_read = int(article.get('time_read', 0)) * 1000
                dt_added = tools.dt_from_ts(time_added)
                dt_read = tools.dt_from_ts(time_read) if time_read else None
                tags = article.get('tags', {}).keys()
                word_count = int(article.get('word_count', 0))
                favorite = int(article.get('favorite', 0)) == 1
                image_url = None
                author = None
                if images:
                    first_image = images.get('1')
                    if first_image:
                        image_url = first_image.get('src')
                if authors:
                    author_keys = authors.keys()
                    if author_keys:
                        author = authors.get(author_keys[0], {}).get('name')
                archived = int(status) == 1
                read = archived and (not tags or 'unread' not in tags)
                r = Readable.CreateOrUpdate(user, source_id=id, title=title, url=url,
                                            image_url=image_url, author=author,
                                            excerpt=excerpt, favorite=favorite,
                                            dt_added=dt_added, word_count=word_count,
                                            dt_read=dt_read,
                                            tags=tags, source=source, read=read)
                if r:
                    r.Update(read=archived, favorite=favorite, dt_read=dt_read)
                    save.append(r)
                    readables.append(r)
        ndb.put_multi(save)  # Save all
        Readable.put_sd_batch(save)
        user.set_integration_prop(TS_KEY, latest_timestamp)
        success = True
    else:
        logging.debug(res.headers)
    return (success, readables, latest_timestamp)
Exemplo n.º 16
0
    def get(self, d):
        hack_id = self.request.get('hack_id')
        res = {}
        if hack_id == 'index_quotes_readables':
            page = self.request.get_range('page')
            PAGE_SIZE = 50
            index_lookup = {}  # index_name -> (index, list of items)
            for q in Quote.query().fetch(limit=PAGE_SIZE,
                                         offset=page * PAGE_SIZE):
                sd, index = q.update_sd(index_put=False)
                if index and index.name not in index_lookup:
                    index_lookup[index.name] = (index, [sd])
                else:
                    index_lookup[index.name][1].append(sd)
            for r in Readable.query().fetch(limit=PAGE_SIZE,
                                            offset=page * PAGE_SIZE):
                sd, index = r.update_sd(index_put=False)
                if index and index.name not in index_lookup:
                    index_lookup[index.name] = (index, [sd])
                else:
                    index_lookup[index.name][1].append(sd)
            if index_lookup:
                n = 0
                for index_tuple in index_lookup.values():
                    index, items = index_tuple
                    index.put(items)
                    n += len(items)
                res['result'] = "Put %d items in %d indexes" % (
                    n, len(index_tuple))
                res['page'] = page

        elif hack_id == 'normalize_key_props':
            dbp = []
            for hd in HabitDay.query().iter():
                habit_key = hd.habit
                if habit_key.parent() is None:
                    # Need to update
                    hd.habit = ndb.Key('User',
                                       hd.key.parent().id(), 'Habit',
                                       int(habit_key.id()))
                    dbp.append(hd)
            res['habitdays'] = len(dbp)
            ndb.put_multi(dbp)
            dbp = []
            for jrnl in MiniJournal.query().iter():
                changes = False
                for i, tag_key in enumerate(jrnl.tags):
                    if tag_key.parent() is None:
                        # Need to update
                        jrnl.tags[i] = ndb.Key('User',
                                               jrnl.key.parent().id(),
                                               'JournalTag', tag_key.id())
                        changes = True
                if changes:
                    dbp.append(jrnl)
            res['journals'] = len(dbp)
            ndb.put_multi(dbp)

        else:
            res['result'] = 'hack_id not found'
        self.json_out(res)
Exemplo n.º 17
0
 def fetch_daily_panel_data(self, since=None, until=None):
     self._maybe_get_habits()
     self._maybe_get_journal_questions()
     if not since:
         since = datetime.combine(
             (datetime.now() - timedelta(days=self.days_ago)).date(),
             time(0, 0))
     if not until:
         until = datetime.combine(
             (datetime.now() - timedelta(days=self.days_ago_end)).date(),
             time(0, 0))
     rows = []
     habitdays_by_day = tools.partition(
         HabitDay.Range(self.user,
                        self.habits.values(),
                        since,
                        until_date=until),
         lambda hd: tools.iso_date(hd.date))
     tasks_by_day = tools.partition(
         Task.DueInRange(self.user, since, until, limit=500),
         lambda t: tools.iso_date(t.dt_due))
     readables_by_day = tools.partition(
         Readable.Fetch(self.user,
                        read=True,
                        since=tools.iso_date(since),
                        until=tools.iso_date(until)),
         lambda r: tools.iso_date(r.dt_read))
     journals, iso_dates = MiniJournal.Fetch(self.user,
                                             start=since,
                                             end=until)
     journals_by_day = tools.partition(
         journals, lambda jrnl: tools.iso_date(jrnl.date))
     cursor = since
     while cursor <= until:
         iso_date = tools.iso_date(cursor)
         tasks = tasks_by_day.get(iso_date, [])
         habitdays = habitdays_by_day.get(iso_date, [])
         readables = readables_by_day.get(iso_date, [])
         journals = journals_by_day.get(iso_date, [])
         journal = journals[0] if journals else None
         tasks_done = tasks_undone = habits_done = habits_cmt = habits_cmt_undone = items_read = 0
         row = {}
         for t in tasks:
             if t.is_done():
                 tasks_done += 1
             else:
                 tasks_undone += 1
         habits_checklist = self.habits.keys()  # list of habit IDs
         for hd in habitdays:
             hid = hd.habit.id()
             h = self.habits.get(hid)
             if h:
                 habits_checklist.remove(hid)
                 row[self._habit_col(h)] = 'true' if hd.done else 'false'
             if hd.done:
                 habits_done += 1
             if hd.committed:
                 habits_cmt += 1
                 if not hd.done:
                     habits_cmt_undone += 1
         if habits_checklist:
             # Missing habit-days, need to create columns anyway
             for hid in habits_checklist:
                 h = self.habits.get(hid)
                 if h:
                     row[self._habit_col(h)] = 'false'
         items_read = len(readables)
         fav_items_read = len([r for r in readables if r.favorite])
         row.update({
             "id": iso_date,
             "date": iso_date,
             "tasks_done": tasks_done,
             "tasks_undone": tasks_undone,
             "habits_done": habits_done,
             "habits_cmt": habits_cmt,
             "habits_cmt_undone": habits_cmt_undone,
             "items_read": items_read,
             "fav_items_read": fav_items_read
         })
         for q in self.journal_questions:
             name = q.get('name')
             value = None
             if journal:
                 value = journal.get_data_value(name)
                 numeric = q.get(
                     'response_type') in JOURNAL.NUMERIC_RESPONSES
                 if numeric:
                     value = tools.safe_number(value, default=0)
                 elif isinstance(value, basestring):
                     value = tools.removeNonAscii(value)
                 else:
                     value = str(value) if value else ""
             row[self._journal_col(q)] = value
         rows.append(row)
         cursor += timedelta(days=1)
     return rows