def iter_users(self): for dpt in self.doc['data']: for d in dpt['users']: u = User() u.id = d['id'] u.name = d['displayName'] v = d['dtContractStart'] if v: u.start = parse_date(v) v = d['dtContractEnd'] if v: u.end = parse_date(v) yield u
def video_info(url): """Fetch info about a video using youtube-dl :param url: URL of the web page containing the video :rtype: :class:`weboob.capabilities.video.Video` """ if not MediaPlayer._find_in_path(os.environ['PATH'], 'youtube-dl'): raise Exception('Please install youtube-dl') try: j = json.loads(subprocess.check_output(['youtube-dl', '-f', 'best', '-J', url])) except subprocess.CalledProcessError: return v = BaseVideo(id=url) v.title = j.get('title') or NotAvailable v.ext = j.get('ext') or NotAvailable v.description = j.get('description') or NotAvailable v.url = j['url'] v.duration = j.get('duration') or NotAvailable v.author = j.get('uploader') or NotAvailable v.rating = j.get('average_rating') or NotAvailable if j.get('thumbnail'): v.thumbnail = Thumbnail(j['thumbnail']) d = j.get('upload_date', j.get('release_date')) if d: v.date = parse_date(d) return v
def get_single_history(self): now = ddate.today() delta = timedelta(days=60) # random duration debit_date = None for tr in self.doc.xpath('//table[@id="operation"]/tbody/tr'): tds = tr.findall('td') get_content = CleanText('.', children=False) raw = get_content(tds[self.COM_TR_TEXT]) comment = get_content(tds[self.COM_TR_COMMENT]) if comment: debit_date = re.sub(u'Débit au ', '', comment) debit = get_content(tds[self.COM_TR_VALUE]) date = parse_date(get_content(tds[self.COM_TR_DATE])) if date > now + delta: date = date.replace(year=date.year - 1) elif date < now - delta: date = date.replace(year=date.year + 1) t = Transaction() t.parse(debit_date or date, re.sub(r'[ ]+', ' ', raw), vdate=date) t.rdate = t.vdate or t.date t.set_amount("", debit) yield t
def video_info(url): """Fetch info about a video using youtube-dl :param url: URL of the web page containing the video :rtype: :class:`weboob.capabilities.video.Video` """ if not MediaPlayer._find_in_path(os.environ['PATH'], 'youtube-dl'): raise Exception('Please install youtube-dl') try: j = json.loads( subprocess.check_output(['youtube-dl', '-f', 'best', '-J', url])) except subprocess.CalledProcessError: return v = BaseVideo(id=url) v.title = j.get('title') or NotAvailable v.ext = j.get('ext') or NotAvailable v.description = j.get('description') or NotAvailable v.url = j['url'] v.duration = j.get('duration') or NotAvailable v.author = j.get('uploader') or NotAvailable v.rating = j.get('average_rating') or NotAvailable if j.get('thumbnail'): v.thumbnail = Thumbnail(j['thumbnail']) d = j.get('upload_date', j.get('release_date')) if d: v.date = parse_date(d) return v
def iter_resources(self, objs, split_path): """ Iter events by category """ if len(split_path) == 0 and self.ASSOCIATED_CATEGORIES != 'ALL': for category in self.ASSOCIATED_CATEGORIES: collection = Collection([category], category) yield collection elif len(split_path) == 1 and split_path[0] in self.ASSOCIATED_CATEGORIES: query = Query() query.categories = split_path query.start_date = datetime.combine(parse_date('today'), time.min) query.end_date = parse_date('') query.city = u'' for event in self.search_events(query): yield event
def iter_documents(self, subid): for d in self.doc['data']['items']: doc = Document() doc.id = '%s_%s' % (subid, d['id']) doc._docid = d['id'] doc.label = d['import']['name'] doc.date = parse_date(d['import']['endDate']) doc.url = urljoin(self.url, '/pagga/download/%s' % doc._docid) doc.type = DocumentTypes.BILL doc.format = 'pdf' yield doc
def _fetch_objects(self, objs): objects = [] collections = [] split_path = self.working_path.get() try: if len(split_path) == 0: for category in CATEGORIES.values: collection = Collection([category], category) collection.backend = u'boobcoming' collections.append(collection) elif len(split_path) == 1 and split_path[0] in CATEGORIES.values: query = Query() query.categories = split_path query.start_date = datetime.combine(parse_date('today'), time.min) query.end_date = parse_date('') query.city = '' for backend, event in self.do('search_events', query): if event: objects.append(event) except CallErrors as errors: self.bcall_errors_handler(errors, CollectionNotFound) return (objects, collections)
def iter_coming_new(self, account): # "pending" have no vdate and debit date is in future self.js_periods.go(headers={'account_token': account._token}) date = parse_date(self.page.get_periods()[0][1]) self.js_pending.go(offset=0, headers={'account_token': account._token}) for tr in self.page.iter_history(): tr.date = date yield tr # "posted" have a vdate but debit date can be future or past today = datetime.date.today() for tr in self.iter_posted_new(account): if tr.date > today: yield tr else: break
def do_list(self, line): """ list [PATTERN] List upcoming events, pattern can be an english or french week day, 'today' or a date (dd/mm/yy[yy]) """ self.change_path([u"events"]) if line: _date = parse_date(line) if not _date: print >> sys.stderr, "Invalid argument: %s" % self.get_command_help("list") return 2 date_from = datetime.combine(_date, time.min) date_to = datetime.combine(_date, time.max) else: date_from = datetime.now() date_to = None for backend, event in self.do("list_events", date_from, date_to): self.cached_format(event)
def do_list(self, line): """ list [PATTERN] List upcoming events, pattern can be an english or french week day, 'today' or a date (dd/mm/yy[yy]) """ self.change_path([u'events']) if line: _date = parse_date(line) if not _date: print('Invalid argument: %s' % self.get_command_help('list'), file=self.stderr) return 2 date_from = datetime.combine(_date, time.min) date_to = datetime.combine(_date, time.max) else: date_from = datetime.now() date_to = None for event in self.do('list_events', date_from, date_to): self.cached_format(event)
def ask_date(self, txt, default=""): r = self.ask(txt, default=default) return parse_date(r)
def ask_date(self, txt, default=''): r = self.ask(txt, default=default) return parse_date(r)