def parse(cls, api, json): ss = cls(api) for k, v in json.items(): if k == 'created_at': setattr(ss, k, parse_datetime(v)) else: setattr(ss, k, v) return ss
def parse(cls, api, json): dm = cls(api) for k, v in json.items(): if k == 'sender' or k == 'recipient': setattr(dm, k, User.parse(api, v)) elif k == 'created_at': setattr(dm, k, parse_datetime(v)) else: setattr(dm, k, v) return dm
def get_log(self, offset, i): log = logreader.LogFile(config.writelog) log.seek(offset) # when the offset is not known, skip_till parameter can be used to query. if i.timestamp: try: timestamp = common.parse_datetime(i.timestamp) logreader.LogReader(log).skip_till(timestamp) except Exception, e: raise web.internalerror(str(e))
def parse(cls, api, json): user = cls(api) for k, v in json.items(): if k == 'created_at': setattr(user, k, parse_datetime(v)) elif k == 'status': setattr(user, k, Status.parse(api, v)) elif k == 'following': # twitter sets this to null if it is false if v is True: setattr(user, k, True) else: setattr(user, k, False) else: setattr(user, k, v) return user
def touch_creation_time(self): self.parse_filename() timeofday = '17:00:00' if self.part != 'b' else '18:10:00' tstr = '%sT%s' % (self.date, timeofday) dt = common.parse_datetime(tstr) new_mtime = time.mktime(dt.timetuple()) self.trace(7, 'Setting modification time to ' + str(dt) + ' (' + str(new_mtime) + ' seconds since 1970)') st = os.stat(self.filename) atime = st.st_atime #access time mtime = st.st_mtime #modification time #modify the file timestamp os.utime(self.filename,(atime,new_mtime))
def parse(cls, api, json): status = cls(api) for k, v in json.items(): if k == 'user': user_model = getattr(api.parser.model_factory, 'user') user = user_model.parse(api, v) setattr(status, 'author', user) setattr(status, 'user', user) # DEPRECIATED elif k == 'created_at': setattr(status, k, parse_datetime(v)) elif k == 'source': if '<' in v: setattr(status, k, parse_html_value(v)) setattr(status, 'source_url', parse_a_href(v)) else: setattr(status, k, v) setattr(status, 'source_url', None) elif k == 'retweeted_status': setattr(status, k, Status.parse(api, v)) else: setattr(status, k, v) return status
def parse_page(self): # good links are # <a href="/sida/avsnitt/587231?programid=2480&playepisode=587231" aria-label="Lyssna(161 min)" class="btn btn-solid play-symbol play-symbol-wide play" data-require="modules/play-on-click"> # <a href="/sida/avsnitt/587242?programid=2480" class="btn2 btn2-image btn2-image-foldable" data-require="modules/play-on-click"> self.episodes_ = [] # for timestamp_span in self.html.find('span class="page-render-timestamp hidden" data-timestamp="2015-07-28 19:11:25" /> html_timestamp = datetime.datetime.today() timestamp_span = self.html.find('//span[@class="page-render-timestamp hidden"]') if not timestamp_span is None: html_timestamp = common.parse_datetime(timestamp_span.attrib['data-timestamp']) html_root = self.html.getroot() head = html_root[0] author_meta = head.find('meta[@name="author"]') self.author = '' if author_meta is None else author_meta.attrib['content'] description_meta = head.find('meta[@name="description"]') self.description = '' if description_meta is None else description_meta.attrib['content'] keywords_meta = head.find('meta[@name="keywords"]') self.title = '' if keywords_meta is None else keywords_meta.attrib['content'] title = head.find('title') if not title is None: self.title = title.text prefix = 'Alla avsnitt' postfix = 'Sveriges Radio' trims = '|- ' if self.title.startswith(prefix): self.title = self.title[len(prefix):] if self.title.endswith(postfix): self.title = self.title[:-len(postfix)] self.title = self.title.strip(trims) if self.program_prefix: self.title = self.program_prefix + ' - ' + self.title self.trace(7, 'After adding program_prefix title became ' + self.title) logo_meta = XmlHandler.find_element_attribute(head, 'meta', 'name', "*:image") self.logo = '' if logo_meta is None else logo_meta.attrib['content'] self.lang = self.html.getroot().attrib.get('lang', '') # episodeExplorerRoot = self.html.find('//div[@class="episode-explorer__list"]') # episodeExplorerRoot = self.html.find('//div[@class="episode-list-item th-p2 th-override"]') divs_to_search = XmlHandler.findall_element_attribute(self.html.getroot(), 'div', 'class', 'episode-list-item *') # divs_to_search = XmlHandler.findall_element_attribute(episodeExplorerRoot, 'div', 'class', "episode-list-item__header") #for hpath in [ # '//div[@class="episode-latest-body"]', # '//div[@class="audio-box-content"]', # '//div[@class="audio-episode-content"]', # '//div[@class="episode-list-item__info-teop"]', # '//div[@class="episode-list__item__title"]', # '//div[@class="audio-heading__title"]', # ]: # divs = self.html.findall(hpath) # if len(divs) == 0: # self.trace(7, 'at ' + hpath + ' nothing found') # else: # self.trace(7, 'at ' + hpath + ' found ', len(divs), ' things') # divs_to_search.extend(divs) if len(divs_to_search) == 0: self.trace(3, 'When searching the HTML page, nothing found') for div in divs_to_search: avsnitt = self.parse_episode(div, html_timestamp) if avsnitt is None: continue self.episodes_.append(avsnitt) self.validate_episodes() return self.episodes_