Esempio n. 1
0
 def __init__(self, entkey, rkey):
     super(AnalysisReportWorker, self).__init__(rkey,
                                                start_att="dt_created",
                                                start_att_direction="")
     self.enterprise = self.report.enterprise
     self.FILTERS = [("enterprise =", self.enterprise)]
     specs = self.report.getSpecs()
     start = specs.get("start", 0)
     end = specs.get("end", 0)
     self.columns = specs.get('columns', [])
     if isinstance(self.columns, basestring):
         self.columns = self.columns.split(',')
     sensortype_id = specs.get("sensortype_id")
     self.report.generate_title("Analysis Report",
                                ts_start=start,
                                ts_end=end,
                                sensortype_id=sensortype_id)
     if start:
         self.FILTERS.append(("dt_created >=", tools.dt_from_ts(start)))
     if end:
         self.FILTERS.append(("dt_created <", tools.dt_from_ts(end)))
     if sensortype_id:
         self.FILTERS.append(("sensortype =",
                              db.Key.from_path('Enterprise',
                                               self.enterprise.key().id(),
                                               'SensorType',
                                               int(sensortype_id))))
     self.sensor_lookup = tools.lookupDict(
         Sensor,
         self.enterprise.sensor_set.fetch(limit=200),
         valueTransform=lambda s: s.name)
     self.headers = ["Key", "Sensor", "Created", "Updated"] + self.columns
Esempio n. 2
0
 def __init__(self, entkey, rkey):
     super(AlarmReportWorker, self).__init__(rkey,
                                             start_att="dt_start",
                                             start_att_direction="")
     self.enterprise = self.report.enterprise
     self.FILTERS = [("enterprise =", self.enterprise)]
     specs = self.report.getSpecs()
     start = specs.get("start", 0)
     end = specs.get("end", 0)
     if start:
         self.FILTERS.append(("dt_start >=", tools.dt_from_ts(start)))
     if end:
         self.FILTERS.append(("dt_start <", tools.dt_from_ts(end)))
     self.report.generate_title("Alarm Report", ts_start=start, ts_end=end)
     self.sensor_lookup = tools.lookupDict(
         Sensor,
         self.enterprise.sensor_set.fetch(limit=200),
         valueTransform=lambda s: s.name,
         keyprop="key_name")
     self.rule_lookup = tools.lookupDict(
         Rule, self.enterprise.rule_set.fetch(limit=100))
     self.headers = [
         "Alarm ID", "Sensor ID", "Sensor", "Rule ID", "Rule", "Apex",
         "Start", "End"
     ]
Esempio n. 3
0
 def add_date_filters(self, start=None, end=None):
     if start:
         self.FILTERS.append(
             "%s >= DATETIME('%s 00:00:00')" %
             (self.start_att, tools.iso_date(tools.dt_from_ts(start))))
     if end:
         self.FILTERS.append(
             "%s < DATETIME('%s 23:59:59')" %
             (self.start_att, tools.iso_date(tools.dt_from_ts(end))))
Esempio n. 4
0
 def __init__(self, sensorkey, rkey):
     super(SensorDataReportWorker, self).__init__(rkey, start_att="dt_recorded", start_att_direction="-")
     self.sensor = Sensor.get(sensorkey)
     self.FILTERS = [("sensor =", self.sensor)]
     self.report.title = "Sensor Data Report [ %s ]" % (self.sensor)
     specs = self.report.getSpecs()
     ts_start = specs.get("ts_start", 0)
     ts_end = specs.get("ts_end", 0)
     if ts_start:
         self.FILTERS.append(("dt_recorded >=", tools.dt_from_ts(ts_start)))
     if ts_end:
         self.FILTERS.append(("dt_recorded <", tools.dt_from_ts(ts_end)))
     self.columns = specs.get('columns',[])
     standard_cols = ["Date"]
     self.headers = standard_cols + self.columns
Esempio n. 5
0
 def __init__(self, entkey, rkey):
     super(AlarmReportWorker, self).__init__(rkey, start_att="dt_start", start_att_direction="-")
     self.enterprise = self.report.enterprise
     self.FILTERS = [("enterprise =", self.enterprise)]
     specs = self.report.getSpecs()
     start = specs.get("start", 0)
     end = specs.get("end", 0)
     if start:
         self.FILTERS.append(("dt_start >=", tools.dt_from_ts(start)))
     if end:
         self.FILTERS.append(("dt_start <", tools.dt_from_ts(end)))
     self.report.generate_title("Alarm Report", ts_start=start, ts_end=end)
     self.sensor_lookup = tools.lookupDict(Sensor, self.enterprise.sensor_set.fetch(limit=200), valueTransform=lambda s : s.name)
     self.rule_lookup = tools.lookupDict(Rule, self.enterprise.rule_set.fetch(limit=100))
     self.headers = ["Sensor","Rule","Apex","Start","End"]
Esempio n. 6
0
 def __init__(self, sensorkey, rkey):
     super(SensorDataReportWorker, self).__init__(rkey, start_att="dt_recorded", start_att_direction="-")
     self.sensor = Sensor.get(sensorkey)
     self.FILTERS = [("sensor =", self.sensor)]
     self.report.title = "Sensor Data Report [ %s ]" % (self.sensor)
     specs = self.report.getSpecs()
     ts_start = specs.get("ts_start", 0)
     ts_end = specs.get("ts_end", 0)
     if ts_start:
         self.FILTERS.append(("dt_recorded >=", tools.dt_from_ts(ts_start)))
     if ts_end:
         self.FILTERS.append(("dt_recorded <", tools.dt_from_ts(ts_end)))
     self.columns = specs.get('columns',[])
     standard_cols = ["Date"]
     self.headers = standard_cols + self.columns
Esempio n. 7
0
    def list(self, d):
        """Fetch a list of targets

        Args:
            max: page size (1-500, default 100)
            updated_since: (optional) timestamp (ms) of update cutoff

        Returns:
            JSON: 'targets' list of target objects
        """
        success = False
        message = None

        _max = self.request.get_range('max', max_value=500, default=100)
        ms_updated_since = self.request.get_range('updated_since', default=0) # ms
        group_id = self.request.get_range("group_id")

        updated_since = tools.dt_from_ts(ms_updated_since) if ms_updated_since else None
        targets = Target.Fetch(d['user'], updated_since=updated_since, group_id=group_id, limit=_max)
        success = True

        data = {
            'targets': [tgt.json() for tgt in targets]
            }
        self.json_out(data, success=success, message=message)
Esempio n. 8
0
    def test_project_report(self):
        prj = Project.Create(self.u)
        prj.Update(title="New Project",
                   subhead="Project subhead",
                   due=datetime(2017, 4, 5))
        prj.set_progress(3)
        prj.put()

        self._test_report(
            {'type': REPORT.PROJECT_REPORT},
            [[
                "Date Created", "Date Due", "Date Completed", "Date Archived",
                "Title", "Subhead", "Links", "Starred", "Archived", "Progress",
                'Progress 10%', 'Progress 20%', 'Progress 30%', 'Progress 40%',
                'Progress 50%', 'Progress 60%', 'Progress 70%', 'Progress 80%',
                'Progress 90%', 'Progress 100%'
            ],
             [
                 tools.sdatetime(prj.dt_created, fmt="%Y-%m-%d %H:%M:%S %Z"),
                 tools.sdatetime(prj.dt_due, fmt="%Y-%m-%d %H:%M:%S %Z"),
                 tools.sdatetime(prj.dt_completed, fmt="%Y-%m-%d %H:%M:%S %Z"),
                 tools.sdatetime(prj.dt_archived,
                                 fmt="%Y-%m-%d %H:%M:%S %Z"), "New Project",
                 "Project subhead", "", "0", "0", "30%", "N/A", "N/A",
                 tools.sdatetime(tools.dt_from_ts(prj.progress_ts[2]),
                                 fmt="%Y-%m-%d %H:%M:%S %Z"), "N/A", "N/A",
                 "N/A", "N/A", "N/A", "N/A", "N/A"
             ]])
Esempio n. 9
0
 def __init__(self, entkey, rkey):
     super(AnalysisReportWorker, self).__init__(rkey, start_att="dt_created", start_att_direction="-")
     self.enterprise = self.report.enterprise
     self.FILTERS = [("enterprise =", self.enterprise)]
     specs = self.report.getSpecs()
     start = specs.get("start", 0)
     end = specs.get("end", 0)
     self.columns = specs.get("columns", "").split(",")
     sensortype_id = specs.get("sensortype_id")
     self.report.generate_title("Analysis Report", ts_start=start, ts_end=end, sensortype_id=sensortype_id)
     if start:
         self.FILTERS.append(("dt_created >=", tools.dt_from_ts(start)))
     if end:
         self.FILTERS.append(("dt_created <", tools.dt_from_ts(end)))
     if sensortype_id:
         self.FILTERS.append(("sensortype =", db.Key.from_path('Enterprise', self.enterprise.key().id(), 'SensorType', int(sensortype_id))))
     self.sensor_lookup = tools.lookupDict(Sensor, self.enterprise.sensor_set.fetch(limit=200), valueTransform=lambda s : s.name)
     self.headers = ["Key","Sensor","Created","Updated"] + self.columns
Esempio n. 10
0
 def __init__(self, rkey):
     super(APILogReportWorker, self).__init__(rkey,
                                              start_att="date",
                                              start_att_direction="")
     self.enterprise = self.report.enterprise
     self.FILTERS = [("enterprise =", self.enterprise)]
     specs = self.report.getSpecs()
     start = specs.get("start", 0)
     end = specs.get("end", 0)
     if start:
         self.FILTERS.append(("date >=", tools.dt_from_ts(start)))
     if end:
         self.FILTERS.append(("date <", tools.dt_from_ts(end)))
     self.report.generate_title("API Log Report",
                                ts_start=start,
                                ts_end=end)
     self.headers = [
         "Request ID", "User ID", "Date", "Path", "Method", "Request"
     ]
Esempio n. 11
0
    def list(self, d):
        success = False
        message = None
        records = []

        _max = self.request.get_range('max', max_value=500, default=100)
        sensor_kn = self.request.get('sensor_kn')
        dt_start = tools.dt_from_ts(self.request.get_range('ts_start'))
        dt_end = tools.dt_from_ts(self.request.get_range('ts_end'))
        downsample = self.request.get_range('downsample')

        if sensor_kn:
            s = Sensor.get_by_key_name(sensor_kn, parent=d['enterprise'])
            if s:
                records = Record.Fetch(s, dt_start=dt_start, dt_end=dt_end, downsample=downsample, limit=_max)
                success = True

        data = {
            'records': [r.json() for r in records]
            }
        self.json_out(data, success=success, message=message)
Esempio n. 12
0
 def __init__(self, sensorkey, rkey):
     super(SensorDataReportWorker, self).__init__(rkey,
                                                  start_att="dt_recorded",
                                                  start_att_direction="")
     title_kwargs = {}
     specs = self.report.getSpecs()
     if sensorkey:
         self.sensor = Sensor.get(sensorkey)
         self.FILTERS = [("sensor =", self.sensor)]
         title_kwargs['sensor'] = str(self.sensor)
     elif specs.get('sensortype_id'):
         sensortype_id = specs.get('sensortype_id')
         title_kwargs['sensor_type'] = sensortype_id
         self.FILTERS = [
             ("sensortype =",
              db.Key.from_path('SensorType',
                               sensortype_id,
                               parent=self.report.enterprise.key()))
         ]
     else:
         # Enterprise wide
         self.FILTERS = [("enterprise =", self.report.enterprise)]
     start = specs.get("start", 0)
     end = specs.get("end", 0)
     if start:
         self.FILTERS.append(("dt_recorded >=", tools.dt_from_ts(start)))
     if end:
         self.FILTERS.append(("dt_recorded <", tools.dt_from_ts(end)))
     self.report.generate_title("Sensor Data Report",
                                ts_start=start,
                                ts_end=end,
                                **title_kwargs)
     self.columns = specs.get('columns', [])
     if isinstance(self.columns, basestring) and ',' in self.columns:
         self.columns = self.columns.split(',')
     standard_cols = ["Record ID", "Sensor ID", "Date"]
     self.headers = standard_cols + self.columns
     self.batch_size = 1000
Esempio n. 13
0
 def entityData(self, prj):
     row = [
         tools.sdatetime(prj.dt_created, fmt=DATE_FMT),
         tools.sdatetime(prj.dt_due, fmt=DATE_FMT),
         tools.sdatetime(prj.dt_completed, fmt=DATE_FMT),
         tools.sdatetime(prj.dt_archived, fmt=DATE_FMT), prj.title,
         prj.subhead, ', '.join(prj.urls), "1" if prj.starred else "0",
         "1" if prj.archived else "0",
         "%d%%" % (prj.progress * 10)
     ]
     for i in range(10):
         val = ""
         if prj.progress_ts and len(prj.progress_ts) > i:
             ms = prj.progress_ts[i]
             val = tools.sdatetime(tools.dt_from_ts(ms), fmt=DATE_FMT)
         row.append(val)
     return row
Esempio n. 14
0
    def list(self, d):
        success = False
        message = None

        _max = self.request.get_range('max', max_value=500, default=100)
        with_records = self.request.get_range('with_records', default=0)
        ms_updated_since = self.request.get_range('updated_since', default=0) # ms
        target_id = self.request.get_range('target_id')
        group_id = self.request.get_range('group_id')

        updated_since = tools.dt_from_ts(ms_updated_since) if ms_updated_since else None
        sensors = Sensor.Fetch(d['user'], updated_since=updated_since, target_id=target_id, group_id=group_id, limit=_max)
        success = True

        data = {
            'sensors': [s.json(with_records=with_records) for s in sensors]
            }
        self.json_out(data, success=success, message=message)
Esempio n. 15
0
 def entityData(self, prj):
     row = [
         tools.sdatetime(prj.dt_created, fmt=DATE_FMT),
         tools.sdatetime(prj.dt_due, fmt=DATE_FMT),
         tools.sdatetime(prj.dt_completed, fmt=DATE_FMT),
         tools.sdatetime(prj.dt_archived, fmt=DATE_FMT),
         prj.title,
         prj.subhead,
         ', '.join(prj.urls),
         "1" if prj.starred else "0",
         "1" if prj.archived else "0",
         "%d%%" % (prj.progress * 10)
     ]
     for i in range(10):
         val = ""
         if prj.progress_ts and len(prj.progress_ts) > i:
             ms = prj.progress_ts[i]
             val = tools.sdatetime(tools.dt_from_ts(ms), fmt=DATE_FMT)
         row.append(val)
     return row
Esempio n. 16
0
    def list(self, d):
        success = False
        message = None

        key_names = self.request.get('key_names') # comma sep
        _max = self.request.get_range('max', max_value=500, default=100)
        with_records = self.request.get_range('with_records', default=0)
        ms_updated_since = self.request.get_range('updated_since', default=0) # ms
        target_id = self.request.get_range('target_id')
        group_id = self.request.get_range('group_id')

        updated_since = tools.dt_from_ts(ms_updated_since) if ms_updated_since else None

        if key_names:
            sensors = Sensor.get_by_key_name(key_names.split(','), parent=self.enterprise)
        else:
            sensors = Sensor.Fetch(d['user'], updated_since=updated_since, target_id=target_id, group_id=group_id, limit=_max)
        success = True

        data = {
            'sensors': [s.json(with_records=with_records) for s in sensors]
            }
        self.json_out(data, success=success, message=message)
Esempio n. 17
0
    def test_project_report(self):
        prj = Project.Create(self.u)
        prj.Update(title="New Project", subhead="Project subhead", due=datetime(2017, 4, 5))
        prj.set_progress(3)
        prj.put()

        self._test_report(
            {'type': REPORT.PROJECT_REPORT},
            [
                ["Date Created", "Date Due", "Date Completed", "Date Archived", "Title", "Subhead",
                 "Links", "Starred", "Archived", "Progress", 'Progress 10%', 'Progress 20%', 'Progress 30%',
                 'Progress 40%', 'Progress 50%', 'Progress 60%', 'Progress 70%', 'Progress 80%',
                 'Progress 90%', 'Progress 100%'],
                [
                    tools.sdatetime(prj.dt_created, fmt="%Y-%m-%d %H:%M:%S %Z"),
                    tools.sdatetime(prj.dt_due, fmt="%Y-%m-%d %H:%M:%S %Z"),
                    tools.sdatetime(prj.dt_completed, fmt="%Y-%m-%d %H:%M:%S %Z"),
                    tools.sdatetime(prj.dt_archived, fmt="%Y-%m-%d %H:%M:%S %Z"),
                    "New Project",
                    "Project subhead",
                    "",
                    "0",
                    "0",
                    "30%",
                    "N/A",
                    "N/A",
                    tools.sdatetime(tools.dt_from_ts(prj.progress_ts[2]), fmt="%Y-%m-%d %H:%M:%S %Z"),
                    "N/A",
                    "N/A",
                    "N/A",
                    "N/A",
                    "N/A",
                    "N/A",
                    "N/A"
                ]
            ]
        )
Esempio n. 18
0
def sync(user, access_token, since_timestamp=0):
    '''
    Return JSON array {title, author, isbn, image}

    Sample dict from pocket:

    {u'resolved_url': u'https://arxiv.org/abs/1701.06538', u'given_title': u'', u'is_article': u'1', u'sort_id': 16, u'word_count': u'221', u'status': u'0', u'has_image': u'0', u'given_url': u'https://arxiv.org/abs/1701.06538', u'favorite': u'0', u'has_video': u'0', u'time_added': u'1485774143', u'time_updated': u'1485774143', u'time_read': u'0', u'excerpt': u'Authors: Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton, Jeff Dean  Abstract: The capacity of a neural network to absorb information is limited by its number of parameters.', u'resolved_title': u'Title: Outrageously Large Neural Networks: The Sparsely-Gated Mixture-of-Experts Layer', u'authors': {u'32207876': {u'url': u'', u'author_id': u'32207876', u'item_id': u'1576987151', u'name': u'cscs.CLcs.NEstatstat.ML'}}, u'resolved_id': u'1576987151', u'item_id': u'1576987151', u'time_favorited': u'0', u'is_index': u'0'}
    {u'resolved_url': u'http://lens.blogs.nytimes.com/2012/10/09/looking-into-the-eyes-of-made-in-china/', u'given_title': u'http://lens.blogs.nytimes.com/2012/10/09/looking-into-the-eyes-of-made-in-c', u'is_article': u'1', u'sort_id': 99, u'word_count': u'800', u'status': u'1', u'has_image': u'0', u'given_url': u'http://lens.blogs.nytimes.com/2012/10/09/looking-into-the-eyes-of-made-in-china/?partner=rss&emc=rss&smid=tw-nytimes', u'favorite': u'0', u'has_video': u'0', u'time_added': u'1349951324', u'time_updated': u'1482284773', u'time_read': u'1482284772', u'excerpt': u'Your clothes, your child\u2019s toys, even the device you use to read these words may have been made in China. They are among the $100 billion of goods that the United States imports from China each year \u2014 an exchange that has become an important issue in the 2012 presidential campaign.', u'resolved_title': u'Looking Into the Eyes of &#8216;Made in China&#8217;', u'authors': {u'3024958': {u'url': u'', u'author_id': u'3024958', u'item_id': u'233921121', u'name': u'KERRI MACDONALD'}}, u'resolved_id': u'233843309', u'item_id': u'233921121', u'time_favorited': u'0', u'is_index': u'0'}
    '''
    data = urllib.urlencode({
        'access_token': access_token,
        'consumer_key': POCKET_CONSUMER_KEY,
        'detailType': 'complete',
        'since': since_timestamp,
        'state': 'all'
    })
    success = False
    res = urlfetch.fetch(
        url=GET_ENDPOINT,
        payload=data,
        method=urlfetch.POST,
        deadline=60,
        validate_certificate=True)
    logging.debug(res.status_code)
    latest_timestamp = 0
    readables = []
    if res.status_code == 200:
        data = json.loads(res.content)
        articles = data.get('list', {})
        latest_timestamp = data.get('since', 0) #?
        save = []
        USE_RESOLVED_TITLE = True
        if articles:
            for id, article in articles.items():
                source = 'pocket'
                if USE_RESOLVED_TITLE:
                    title = article.get('resolved_title')
                else:
                    title = article.get('given_title')
                url = article.get('given_url')
                status = article.get('status')
                authors = article.get('authors')
                excerpt = article.get('excerpt')
                images = article.get('images')
                time_added = int(article.get('time_added', 0)) * 1000
                time_read = int(article.get('time_read', 0)) * 1000
                dt_added = tools.dt_from_ts(time_added)
                dt_read = tools.dt_from_ts(time_read) if time_read else None
                tags = article.get('tags', {}).keys()
                word_count = int(article.get('word_count', 0))
                favorite = int(article.get('favorite', 0)) == 1
                image_url = None
                author = None
                if images:
                    first_image = images.get('1')
                    if first_image:
                        image_url = first_image.get('src')
                if authors:
                    author_keys = authors.keys()
                    if author_keys:
                        author = authors.get(author_keys[0], {}).get('name')
                archived = int(status) == 1
                read = archived and (not tags or 'unread' not in tags)
                r = Readable.CreateOrUpdate(user, source_id=id, title=title, url=url,
                                            image_url=image_url, author=author,
                                            excerpt=excerpt, favorite=favorite,
                                            dt_added=dt_added, word_count=word_count,
                                            dt_read=dt_read,
                                            tags=tags, source=source, read=read)
                if r:
                    r.Update(read=archived, favorite=favorite, dt_read=dt_read)
                    save.append(r)
                    readables.append(r)
        ndb.put_multi(save)  # Save all
        Readable.put_sd_batch(save)
        success = True
    else:
        logging.debug(res.headers)
    return (success, readables, latest_timestamp)
Esempio n. 19
0
def sync(user, access_token):
    '''
    Return JSON array {title, author, isbn, image}

    Sample dict from pocket:

    {u'resolved_url': u'https://arxiv.org/abs/1701.06538', u'given_title': u'', u'is_article': u'1', u'sort_id': 16, u'word_count': u'221', u'status': u'0', u'has_image': u'0', u'given_url': u'https://arxiv.org/abs/1701.06538', u'favorite': u'0', u'has_video': u'0', u'time_added': u'1485774143', u'time_updated': u'1485774143', u'time_read': u'0', u'excerpt': u'Authors: Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton, Jeff Dean  Abstract: The capacity of a neural network to absorb information is limited by its number of parameters.', u'resolved_title': u'Title: Outrageously Large Neural Networks: The Sparsely-Gated Mixture-of-Experts Layer', u'authors': {u'32207876': {u'url': u'', u'author_id': u'32207876', u'item_id': u'1576987151', u'name': u'cscs.CLcs.NEstatstat.ML'}}, u'resolved_id': u'1576987151', u'item_id': u'1576987151', u'time_favorited': u'0', u'is_index': u'0'}
    {u'resolved_url': u'http://lens.blogs.nytimes.com/2012/10/09/looking-into-the-eyes-of-made-in-china/', u'given_title': u'http://lens.blogs.nytimes.com/2012/10/09/looking-into-the-eyes-of-made-in-c', u'is_article': u'1', u'sort_id': 99, u'word_count': u'800', u'status': u'1', u'has_image': u'0', u'given_url': u'http://lens.blogs.nytimes.com/2012/10/09/looking-into-the-eyes-of-made-in-china/?partner=rss&emc=rss&smid=tw-nytimes', u'favorite': u'0', u'has_video': u'0', u'time_added': u'1349951324', u'time_updated': u'1482284773', u'time_read': u'1482284772', u'excerpt': u'Your clothes, your child\u2019s toys, even the device you use to read these words may have been made in China. They are among the $100 billion of goods that the United States imports from China each year \u2014 an exchange that has become an important issue in the 2012 presidential campaign.', u'resolved_title': u'Looking Into the Eyes of &#8216;Made in China&#8217;', u'authors': {u'3024958': {u'url': u'', u'author_id': u'3024958', u'item_id': u'233921121', u'name': u'KERRI MACDONALD'}}, u'resolved_id': u'233843309', u'item_id': u'233921121', u'time_favorited': u'0', u'is_index': u'0'}
    '''
    dt = datetime.now() - timedelta(days=7)
    init_sync_since = tools.unixtime(dt, ms=False)
    TS_KEY = 'pocket_last_timestamp'  # Seconds
    since_timestamp = user.get_integration_prop(TS_KEY, init_sync_since)
    data = urllib.urlencode({
        'access_token': access_token,
        'consumer_key': POCKET_CONSUMER_KEY,
        'detailType': 'complete',
        'since': since_timestamp,
        'state': 'all'
    })
    success = False
    logging.debug("Syncing pocket for %s since %s" % (user, dt))
    res = urlfetch.fetch(
        url=GET_ENDPOINT,
        payload=data,
        method=urlfetch.POST,
        deadline=60,
        validate_certificate=True)
    logging.debug(res.status_code)
    latest_timestamp = 0
    readables = []
    if res.status_code == 200:
        data = json.loads(res.content)
        articles = data.get('list', {})
        latest_timestamp = data.get('since', 0) #?
        save = []
        USE_RESOLVED_TITLE = True
        if articles:
            for id, article in articles.items():
                source = 'pocket'
                if USE_RESOLVED_TITLE:
                    title = article.get('resolved_title')
                else:
                    title = article.get('given_title')
                url = article.get('given_url')
                status = article.get('status')
                authors = article.get('authors')
                excerpt = article.get('excerpt')
                images = article.get('images')
                time_added = int(article.get('time_added', 0)) * 1000
                time_read = int(article.get('time_read', 0)) * 1000
                dt_added = tools.dt_from_ts(time_added)
                dt_read = tools.dt_from_ts(time_read) if time_read else None
                tags = article.get('tags', {}).keys()
                word_count = int(article.get('word_count', 0))
                favorite = int(article.get('favorite', 0)) == 1
                image_url = None
                author = None
                if images:
                    first_image = images.get('1')
                    if first_image:
                        image_url = first_image.get('src')
                if authors:
                    author_keys = authors.keys()
                    if author_keys:
                        author = authors.get(author_keys[0], {}).get('name')
                archived = int(status) == 1
                read = archived and (not tags or 'unread' not in tags)
                r = Readable.CreateOrUpdate(user, source_id=id, title=title, url=url,
                                            image_url=image_url, author=author,
                                            excerpt=excerpt, favorite=favorite,
                                            dt_added=dt_added, word_count=word_count,
                                            dt_read=dt_read,
                                            tags=tags, source=source, read=read)
                if r:
                    r.Update(read=archived, favorite=favorite, dt_read=dt_read)
                    save.append(r)
                    readables.append(r)
        ndb.put_multi(save)  # Save all
        Readable.put_sd_batch(save)
        user.set_integration_prop(TS_KEY, latest_timestamp)
        success = True
    else:
        logging.debug(res.headers)
    return (success, readables, latest_timestamp)
Esempio n. 20
0
 def add_date_filters(self, start=None, end=None):
     if start:
         self.FILTERS.append("%s >= DATETIME('%s 00:00:00')" % (self.start_att, tools.iso_date(tools.dt_from_ts(start))))
     if end:
         self.FILTERS.append("%s < DATETIME('%s 23:59:59')" % (self.start_att, tools.iso_date(tools.dt_from_ts(end))))