def on_callback(self, request): if request.method != 'POST': request.respond('This hook only supports POST method.') else: if request.GET.get('secret', [None])[0] != self.bot.config.draftin_secret: request.respond('Wrong secret was specified') else: payload = anyjson.deserialize(request.POST['payload'][0]) title = payload['name'] content = payload['content'] slug = slugify(title) created_at = times.to_universal(payload['created_at']) updated_at = times.to_universal(payload['updated_at']) timezone = self.bot.config.timezone with open(os.path.join( self.bot.config.documents_dir, slug + '.md'), 'w') as f: post_content = self.template.format(title=title, content=content, slug=slug, created_at=times.format(created_at, timezone, '%Y-%m-%d %H:%M'), updated_at=times.format(updated_at, timezone, '%Y-%m-%d %H:%M')) f.write(post_content.encode('utf-8')) try: subprocess.check_output(self.bot.config.update_command, stderr=subprocess.STDOUT, shell=True) except subprocess.CalledProcessError, e: request.respond(u'I tried to update a blog, but there was an error: ' + e.output.encode('utf-8')) else: request.respond('Done, published')
def test_to_universal_without_tzinfo(self): """Convert local dates without timezone info to universal date""" # Same as above, but with tzinfo stripped off (as if a NY and Amsterdam # user used datetime.now()) ny_time = self.time_in_ny.replace(tzinfo=None) ams_time = self.time_in_ams.replace(tzinfo=None) # When time has no tzinfo attached, it should be specified explicitly est = pytz.timezone('EST') self.assertEquals(times.to_universal(ny_time, est), self.sometime_univ) # ...or simply with a string self.assertEquals(times.to_universal(ams_time, 'Europe/Amsterdam'), self.sometime_univ)
def _parse_row(self, row, tags=None): movie_el = row.cssselect_first('.movie a:not(.tag)') url = movie_el.link() title = movie_el.text_content() date_el = row.cssselect_first('.date').text_content(whitespace=True) date, time = re.split(r'[\r\n]+', date_el) starts_at = times.to_universal( datetime.datetime.combine( parsers.date_cs(date), datetime.time(*[int(n) for n in time.split(':')])), 'Europe/Prague') tags = self._parse_tags(row, tags) details = self._parse_details(url) return Showtime( cinema=cinema, film_scraped=ScrapedFilm(title_main_scraped=title, url=url, **details), starts_at=starts_at, tags=tags, url=self.url, )
def _parse_time(self, el, date): """Parses time from given element, combines it with given date and returns corresponding datetime object in UTC. """ time = datetime.time(*[int(t) for t in el.text_content().split(':')]) dt = datetime.datetime.combine(date, time) return times.to_universal(dt, timezone='Europe/Prague')
def create_event(): form = EventForm() if form.validate_on_submit(): event = Event() with db.transaction as session: event.name = form.name.data event.venue = form.venue.data event.description = form.description.data event.user = current_user event.starts_at = times.to_universal(form.starts_at.data, current_user.timezone) session.add(event) with db.transaction: event.contacts_invited_ids_str = form.contacts_invited_ids_str.data send_email_invites(event) return redirect(url_for('facebook_event', id=event.id)) else: # default starts_at td = datetime.timedelta(days=1) dt = times.to_local(times.now(), current_user.timezone) + td dt = datetime.datetime.combine(dt.date(), datetime.time(20, 00, 00)) form.starts_at.data = dt return render_template('create_event.html', form=form)
def _parse_event(self, event): starts_at = times.to_universal(event.get('dtstart').dt) title_main = event.get('summary') title_orig = year = length = None tags = [] match = self.desc_re.match(event.get('description')) if match: if match.group('title'): title_orig = match.group('title').strip() year = int(match.group('year')) length = int(match.group('min')) # TODO scrape tags according to new implementation of tags # presented in https://github.com/honzajavorek/zitkino.cz/issues/97 tags = [self.tags_map.get(t.strip()) for t in match.group('tags').split(',')] return Showtime( cinema=cinema, film_scraped=ScrapedFilm( title_main_scraped=title_main, title_orig_scraped=title_orig, year=year, length=length, ), starts_at=starts_at, tags={tag: None for tag in tags if tag}, url='http://kinonadobraku.cz', )
def date_time_year(date, time, year=None, tz='Europe/Prague'): """Parses strings representing parts of datetime and combines them together. Resulting datetime is in UTC. """ dt_string = u'{date} {time} {year}'.format( date=date, time=time, year=year or times.now().year, ) possible_formats = ( '%d. %m. %H:%M %Y', '%d. %m. %H.%M %Y', ) dt = None for format in possible_formats: try: dt = datetime.datetime.strptime(dt_string, format) except ValueError: pass else: break if dt: return times.to_universal(dt, tz) else: raise ValueError(dt_string)
def _parse_event(self, event): starts_at = times.to_universal(event.get('dtstart').dt) title_main = event.get('summary') titles = [title_main] title_orig = year = length = None tags = [] match = self.desc_re.match(event.get('description')) if match: if match.group('title'): title_orig = match.group('title').strip() titles.append(title_orig) year = int(match.group('year')) length = int(match.group('min')) tags = [self.tags_map.get(t.strip()) for t in match.group('tags').split(',')] return Showtime( cinema=cinema, film_scraped=ScrapedFilm( title_main=title_main, title_orig=title_orig, titles=titles, year=year, length=length, ), starts_at=starts_at, tags=tags, price=self.price, )
def fetch_user_events(login): logger = logging.getLogger('events') logger.debug('fetching events for %s' % (login, )) db = get_db() user = db.users.find_one({'login': login}) # calculating time of last saved event last_item = list( db.received_events.find({ 'gitorama.login': user['login'] }).sort([('created_at', -1)])[:1]) if last_item: last_item = last_item[0]['id'] # this is a GitHub's event id else: last_item = None gh = net.GitHub(token=user['gitorama']['token']) for event in gh.get_iter('/users/{0}/received_events'.format( user['login']), per_page=30): if event['id'] == last_item: # don't fetch more than needed # this item already saved break event['created_at'] = times.to_universal(event['created_at']) event['gitorama'] = {'login': user['login']} db.received_events.save(event)
def _parse_row(self, row, tags=None): movie_el = row.cssselect_first('.movie a:not(.tag)') url = movie_el.link() title = movie_el.text_content() date_el = row.cssselect_first('.date').text_content(whitespace=True) date, time = re.split(r'[\r\n]+', date_el) starts_at = times.to_universal(datetime.datetime.combine( parsers.date_cs(date), datetime.time(*[int(n) for n in time.split(':')]) ), 'Europe/Prague') tags = self._parse_tags(row, tags) details = self._parse_details(url) return Showtime( cinema=cinema, film_scraped=ScrapedFilm( title_main_scraped=title, url=url, **details ), starts_at=starts_at, tags=tags, url=self.url, )
def _parse_standalone_dates(self, dates_text): """Takes text with date & time information, parses out and generates standalone showtimes. """ dates_text = self.range_re.sub('', dates_text) for match in self.standalone_re.finditer(dates_text): date_args_list = [] # standalone date date_args_list.append(map(int, [ self._determine_year(match.group(2)), # year match.group(2), # month match.group(1), # day ])) # date+date, let's process the second one if match.group(3): date_args_list.append(map(int, [ self._determine_year(match.group(5)), # year match.group(5), # month match.group(4), # day ])) # parse times time_args_list = self._parse_times(match.group(6)) # construct and yield datetimes for date_args in date_args_list: for time_args in time_args_list: yield times.to_universal( datetime(*(date_args + time_args)), self.tz )
def _parse_event(self, event): starts_at = times.to_universal(event.get('dtstart').dt) title_main = event.get('summary') title_orig = year = length = None tags = [] match = self.desc_re.match(event.get('description')) if match: if match.group('title'): title_orig = match.group('title').strip() year = int(match.group('year')) length = int(match.group('min')) # TODO scrape tags according to new implementation of tags # presented in https://github.com/honzajavorek/zitkino.cz/issues/97 tags = [ self.tags_map.get(t.strip()) for t in match.group('tags').split(',') ] return Showtime( cinema=cinema, film_scraped=ScrapedFilm( title_main_scraped=title_main, title_orig_scraped=title_orig, year=year, length=length, ), starts_at=starts_at, tags={tag: None for tag in tags if tag}, url='http://kinonadobraku.cz', )
def _parse_date_ranges(self, dates_text): """Takes text with date & time information, parses out and generates showtimes within date ranges. """ for match in self.range_re.finditer(dates_text): # days start_day = int(match.group(1)) end_day = int(match.group(3)) # months start_month = int(match.group(2)) end_month = int(match.group(4)) # times time_args_list = self._parse_times(match.group(5)) # years start_year = self._determine_year(start_month) end_year = self._determine_year(end_month) # bounds for rrule start = datetime(start_year, start_month, start_day) end = datetime(end_year, end_month, end_day) # construct and yield datetimes for day in rrule.rrule(rrule.DAILY, dtstart=start, until=end): for time_args in time_args_list: yield times.to_universal( datetime.combine(day, time(*time_args)), self.tz )
def test_to_universal_without_tzinfo(self): """Convert local dates without timezone info to universal date""" # Same as above, but with tzinfo stripped off (as if a NY and Amsterdam # user used datetime.now()) ny_time = self.time_in_ny.replace(tzinfo=None) ams_time = self.time_in_ams.replace(tzinfo=None) # When time has no tzinfo attached, it should be specified explicitly est = 'EST' self.assertEquals(times.to_universal(ny_time, est), self.sometime_univ) # ...or simply with a string self.assertEquals(times.to_universal(ams_time, 'Europe/Amsterdam'), self.sometime_univ)
def test_to_universal_with_unix_timestamp(self): """Convert UNIX timestamps to universal date""" unix_time = 1328257004.456 # as returned by time.time() self.assertEquals( times.to_universal(unix_time), datetime(2012, 2, 3, 8, 16, 44, 456000) )
def _parse_date_ranges(self, dates_text): """Takes text with date & time information, parses out and generates showtimes within date ranges. """ for match in self.range_re.finditer(dates_text): # days start_day = int(match.group(1)) end_day = int(match.group(3)) # months start_month = int(match.group(2)) end_month = int(match.group(4)) # times time_args_list = self._parse_times(match.group(5)) # years start_year = self._determine_year(start_month) end_year = self._determine_year(end_month) # bounds for rrule start = datetime(start_year, start_month, start_day) end = datetime(end_year, end_month, end_day) # construct and yield datetimes for day in rrule.rrule(rrule.DAILY, dtstart=start, until=end): for time_args in time_args_list: yield times.to_universal( datetime.combine(day, time(*time_args)), self.tz)
def test_to_universal_with_tzinfo(self): # noqa """Convert local dates with timezone info to universal date""" ny_time = self.time_in_ny ams_time = self.time_in_ams self.assertEquals(times.to_universal(ny_time), self.sometime_univ) self.assertEquals(times.to_universal(ams_time), self.sometime_univ) self.assertEquals(ny_time.hour, 6) self.assertEquals(times.to_universal(ny_time).hour, 11) self.assertEquals(ams_time.hour, 12) self.assertEquals(times.to_universal(ams_time).hour, 11) # Test alias from_local, too self.assertEquals(times.from_local(ny_time), self.sometime_univ)
def _parse_event(self, event): starts_at = times.to_universal(event.get('dtstart').dt) title_main = event.get('summary') titles = [title_main] title_orig = year = length = None tags = [] match = self.desc_re.match(event.get('description')) if match: if match.group('title'): title_orig = match.group('title').strip() titles.append(title_orig) year = int(match.group('year')) length = int(match.group('min')) tags = [ self.tags_map.get(t.strip()) for t in match.group('tags').split(',') ] return Showtime( cinema=cinema, film_scraped=ScrapedFilm( title_main=title_main, title_orig=title_orig, titles=titles, year=year, length=length, ), starts_at=starts_at, tags=tags, )
def calculate_score(self): now = times.now() then = times.to_universal(self.created) hour_age= ceil((now-then).total_seconds()/60/60) gravity = 1.8 self.calculated_score = (self.love_count-1)/pow((hour_age+2), gravity) self.save() print "%s -- %s -- %s" % (self.calculated_score, hour_age, self.title) return (self.love_count-1) / pow((hour_age+2), gravity)
def test_local_time_with_tzinfo_to_universal(self): """Convert local dates with timezone info to universal date""" ny_time = self.sometime_in_newyork ams_time = self.sometime_in_amsterdam self.assertEquals( times.to_universal(ny_time), self.sometime_univ) self.assertEquals( times.to_universal(ams_time), self.sometime_univ) self.assertEquals(ny_time.hour, 6) self.assertEquals( times.to_universal(ny_time).hour, 11) self.assertEquals(ams_time.hour, 12) self.assertEquals( times.to_universal(ams_time).hour, 11)
def str_to_utc(s, format_str='%Y-%m-%d %H:%M:%S', timezone=settings.TIME_ZONE, default=None): """本地日期字符串转化为 UTC datetime""" try: d = str_to_local(s, format_str) return times.to_universal(d, timezone).replace(tzinfo=pytz.UTC) except: if default: return default else: raise
def test_persistence_of_empty_jobs(self): # noqa """Storing empty jobs.""" job = Job() job.save() expected_date = strip_milliseconds(job.created_at) stored_date = self.testconn.hget(job.key, 'created_at') self.assertEquals(times.to_universal(stored_date), expected_date) # ... and no other keys are stored self.assertItemsEqual(self.testconn.hkeys(job.key), ['created_at'])
def test_persistence_of_typical_jobs(self): """Storing typical jobs.""" job = Job.create(func=some_calculation, args=(3, 4), kwargs=dict(z=2)) job.save() expected_date = strip_milliseconds(job.created_at) stored_date = self.testconn.hget(job.key, "created_at") self.assertEquals(times.to_universal(stored_date), expected_date) # ... and no other keys are stored self.assertItemsEqual(self.testconn.hkeys(job.key), ["created_at", "data", "description"])
def test_persistence_of_empty_jobs(self): # noqa """Storing empty jobs.""" job = Job() job.save() expected_date = strip_milliseconds(job.created_at) stored_date = self.testconn.hget(job.key, "created_at") self.assertEquals(times.to_universal(stored_date), expected_date) # ... and no other keys are stored self.assertItemsEqual(self.testconn.hkeys(job.key), ["created_at"])
def __call__(self): resp = self.session.get(self.url) html = parsers.html(resp.content, base_url=resp.url) for event in html.cssselect('.event'): header = event.cssselect_first('h2') url = header.link() title = header.text_content() title_parts = title.split('/') if len(title_parts) == 2: # naive, but for now good enough title_main, title_orig = title_parts else: title_main = title title_orig = None details = event.cssselect_first('.descshort').text_content() cat = event.cssselect_first('.title-cat').text_content().lower() tags = [] for regexp, tag in self.tag_re: if regexp.search(title_main): tags.append(tag) title_main = regexp.sub('', title_main).strip() if title_orig and regexp.search(title_orig): tags.append(tag) title_orig = regexp.sub('', title_orig).strip() if regexp.search(details): tags.append(tag) if cat != 'filmy': tags.append(cat) d = parsers.date_cs( event.cssselect_first('.nextdate strong').text ) t = event.cssselect_first('.nextdate .evttime').text_content() t = time(*map(int, t.split(':'))) starts_at = times.to_universal(datetime.combine(d, t), self.tz) yield Showtime( cinema=cinema, film_scraped=ScrapedFilm( title_main_scraped=title_main, title_orig=title_orig or None, ), starts_at=starts_at, url=url, url_booking=self.url_booking, tags={tag: None for tag in tags}, )
def _parse_entry(self, entry): try: description = next( line for line in entry.text_content(whitespace=True).splitlines() if self.length_re.search(line) ) except StopIteration: return None # it's not a film date_el = entry.cssselect_first('h4 span') date = datetime.datetime(*reversed( [int(n) for n in date_el.text_content().split('.')] )) time_el = entry.cssselect_first('.start') time_match = self.time_re.search(time_el.text_content()) time = datetime.time( int(time_match.group(1)), int(time_match.group(2)), ) starts_at = times.to_universal( datetime.datetime.combine(date, time), 'Europe/Prague' ) title = date_el.tail tags = {} detail_data = {} details = [detail.strip() for detail in description.split(',')] for detail in details: if self.year_re.match(detail): detail_data['year'] = int(detail) match = self.length_re.match(detail) if match: detail_data['length'] = int(match.group(1)) if 'tit.' in detail or 'titulky' in detail or 'dabing' in detail: tags[detail] = None return Showtime( cinema=cinema, film_scraped=ScrapedFilm( title_main_scraped=title, **detail_data ), starts_at=starts_at, tags=tags, url=self.url, )
def __call__(self): resp = self.session.get(self.url) html = parsers.html(resp.content, base_url=resp.url) for event in html.cssselect('.event'): header = event.cssselect_first('h2') url = header.link() title = header.text_content() title_parts = title.split('/') if len(title_parts) == 2: # naive, but for now good enough title_main, title_orig = title_parts else: title_main = title title_orig = None details = event.cssselect_first('.descshort').text_content() cat = event.cssselect_first('.title-cat').text_content().lower() tags = [] for regexp, tag in self.tag_re: if regexp.search(title_main): tags.append(tag) title_main = regexp.sub('', title_main).strip() if title_orig and regexp.search(title_orig): tags.append(tag) title_orig = regexp.sub('', title_orig).strip() if regexp.search(details): tags.append(tag) if cat != 'filmy': tags.append(cat) d = parsers.date_cs(event.cssselect_first('.nextdate strong').text) t = event.cssselect_first('.nextdate .evttime').text_content() t = time(*map(int, t.split(':'))) starts_at = times.to_universal(datetime.combine(d, t), self.tz) yield Showtime( cinema=cinema, film_scraped=ScrapedFilm( title_main_scraped=title_main, title_orig=title_orig or None, ), starts_at=starts_at, url=url, url_booking=self.url_booking, tags={tag: None for tag in tags}, )
def test_persistence_of_typical_jobs(self): """Storing typical jobs.""" job = Job.create(func=some_calculation, args=(3, 4), kwargs=dict(z=2)) job.save() expected_date = strip_milliseconds(job.created_at) stored_date = self.testconn.hget(job.key, 'created_at') self.assertEquals(times.to_universal(stored_date), expected_date) # ... and no other keys are stored self.assertItemsEqual(self.testconn.hkeys(job.key), ['created_at', 'data', 'description'])
def on_callback(self, request): if request.method != 'POST': request.respond('This hook only supports POST method.') else: if request.GET.get('secret', [None])[0] != self.bot.config.draftin_secret: request.respond('Wrong secret was specified') else: payload = anyjson.deserialize(request.POST['payload'][0]) title = payload['name'] content = payload['content'] slug = slugify(title) created_at = times.to_universal(payload['created_at']) updated_at = times.to_universal(payload['updated_at']) timezone = self.bot.config.timezone with open( os.path.join(self.bot.config.documents_dir, slug + '.md'), 'w') as f: post_content = self.template.format( title=title, content=content, slug=slug, created_at=times.format(created_at, timezone, '%Y-%m-%d %H:%M'), updated_at=times.format(updated_at, timezone, '%Y-%m-%d %H:%M')) f.write(post_content.encode('utf-8')) try: subprocess.check_output(self.bot.config.update_command, stderr=subprocess.STDOUT, shell=True) except subprocess.CalledProcessError, e: request.respond( u'I tried to update a blog, but there was an error: ' + e.output.encode('utf-8')) else: request.respond('Done, published')
def edit_event(id): """Event editing.""" event = current_user.event_or_404(id) form = EventForm(obj=event) if form.validate_on_submit(): with db.transaction as session: form.populate_obj(event) event.starts_at = times.to_universal(form.starts_at.data, current_user.timezone) send_email_invites(event) return redirect(url_for('facebook_event', id=event.id)) return render_template('edit_event.html', event=event, action='edit', form=form)
def timezone(self, zone): """ Change the time zone and affect the current moment's time. Note, a locality must already be set. """ date = self._date try: date = times.to_local(times.to_universal(date), zone) except: date = times.to_local(date, zone) finally: self._date = date return self
def remind(self, request, datetime, about): """Remind about a TODO at given time.""" try: user = request.get_user() dt = parse(datetime) tz = self._get_user_timezone(user) dt = times.to_universal(dt, tz) tasks = self._get_tasks(user) bisect.insort(tasks, (dt, about, request)) self._set_tasks(user, tasks) except Exception, e: request.respond(u'Unable to parse a date: ' + unicode(e)) raise
def test_persistence_of_typical_jobs(self): """Storing typical jobs.""" job = Job.create(func=some_calculation, args=(3, 4), kwargs=dict(z=2)) job.save() expected_date = strip_milliseconds(job.created_at) stored_date = self.testconn.hget(job.key, 'created_at').decode('utf-8') self.assertEquals( times.to_universal(stored_date), expected_date) # ... and no other keys are stored self.assertEqual( sorted(self.testconn.hkeys(job.key)), [b'created_at', b'data', b'description'])
def test_persistence_of_typical_jobs(self): """Storing typical jobs.""" job = Job.create(some_calculation, 3, 4, z=2) job.save() expected_date = strip_milliseconds(job.created_at) stored_date = self.testconn.hget(job.key, 'created_at') self.assertEquals( times.to_universal(stored_date), expected_date) # ... and no other keys are stored self.assertItemsEqual( self.testconn.hkeys(job.key), ['created_at', 'data', 'description'])
def test_to_universal_with_string(self): dt = self.sometime_univ # Timezone-aware strings self.assertEquals(dt, times.to_universal('2012-02-02 00:56:31+13:00')) self.assertEquals(dt, times.to_universal('2012-02-01 12:56:31+01:00')) self.assertEquals(dt, times.to_universal('2012-02-01 06:56:31-05:00')) # Timezone-less strings require explicit source timezone self.assertEquals(dt, times.to_universal('2012-02-02 00:56:31', 'Pacific/Auckland')) self.assertEquals(dt, times.to_universal('2012-02-01 12:56:31', 'CET')) self.assertEquals(dt, times.to_universal('2012-02-01 06:56:31', 'EST')) # Without a timezone, UTC is assumed self.assertEquals(dt, times.to_universal('2012-02-01 11:56:31'))
def _parse_row(self, day, row, labels): a = row.cssselect_first('a.featureLink') title = a.text_content() url = a.link() details = self._parse_details(url) tags = {} showtimes = [] row = list(row.iterchildren())[1:] labels = list(labels.iterchildren())[1:] table = [ (c.text_content(), l.text_content()) for (c, l) in zip(row, labels) ] for cell, label in table: if label: if label == 'Min.': details.setdefault('length', int(cell)) elif cell != '---': tags[cell] = label elif cell: showtimes.extend(cell.split()) for regexp, tag in self.tag_re: if regexp.search(title): tags[tag] = None title = regexp.sub('', title).strip() for st in showtimes: starts_at = times.to_universal(datetime.datetime.combine( day, datetime.time(*[int(n) for n in st.split(':')]) ), 'Europe/Prague') yield Showtime( cinema=self.cinema, film_scraped=ScrapedFilm( title_main_scraped=title, **details ), starts_at=starts_at, tags=tags, url='http://www.cinemacity.cz/', )
def remind(self, request, datetime, about): """Remind about a TODO at given time.""" try: identity = self.bot.get_plugin('identity').get_identity_by_request(request) dt = parse(datetime) tz = self._get_user_timezone(identity) dt = times.to_universal(dt, tz) tasks = self._get_tasks(identity) bisect.insort(tasks, (dt, about, identity.id)) self._set_tasks(identity, tasks) except Exception as e: request.respond('Unable to parse a date: ' + six.text_type(e)) raise request.respond('ok')
def remind(self, request, datetime, about): """Remind about a TODO at given time.""" try: identity = self.bot.get_plugin('identity').get_identity_by_request( request) dt = parse(datetime) tz = self._get_user_timezone(identity) dt = times.to_universal(dt, tz) tasks = self._get_tasks(identity) bisect.insort(tasks, (dt, about, identity.id)) self._set_tasks(identity, tasks) except Exception as e: request.respond('Unable to parse a date: ' + six.text_type(e)) raise request.respond('ok')
def test_to_universal_with_string(self): dt = self.sometime_univ # Timezone-aware strings self.assertEquals(dt, times.to_universal('2012-02-02 00:56:31+13:00')) self.assertEquals(dt, times.to_universal('2012-02-01 12:56:31+01:00')) self.assertEquals(dt, times.to_universal('2012-02-01 06:56:31-05:00')) # Timezone-less strings require explicit source timezone self.assertEquals(dt, times.to_universal('2012-02-02 00:56:31', 'Pacific/Auckland')) self.assertEquals(dt, times.to_universal('2012-02-01 12:56:31', 'CET')) self.assertEquals(dt, times.to_universal('2012-02-01 06:56:31', 'EST')) # Timezone-less strings are rejected if source timezone is not # specified with self.assertRaises(ValueError): self.assertEquals(dt, times.to_universal('2012-02-01 12:56:31'))
def test_to_universal_with_string(self): dt = self.sometime_univ # Timezone-aware strings self.assertEquals(dt, times.to_universal('2012-02-02 00:56:31+13:00')) self.assertEquals(dt, times.to_universal('2012-02-01 12:56:31+01:00')) self.assertEquals(dt, times.to_universal('2012-02-01 06:56:31-05:00')) # Timezone-less strings require explicit source timezone self.assertEquals( dt, times.to_universal('2012-02-02 00:56:31', 'Pacific/Auckland')) self.assertEquals(dt, times.to_universal('2012-02-01 12:56:31', 'CET')) self.assertEquals(dt, times.to_universal('2012-02-01 06:56:31', 'EST')) # Timezone-less strings are rejected if source timezone is not # specified with self.assertRaises(ValueError): self.assertEquals(dt, times.to_universal('2012-02-01 12:56:31'))
def _parse_row(self, day, row, labels): a = row.cssselect_first('a.featureLink') title = a.text_content() url = a.link() details = self._parse_details(url) tags = {} showtimes = [] row = list(row.iterchildren())[1:] labels = list(labels.iterchildren())[1:] table = [(c.text_content(), l.text_content()) for (c, l) in zip(row, labels)] for cell, label in table: if label: if label == 'Min.': details.setdefault('length', int(cell)) elif cell != '---': tags[cell] = label elif cell: showtimes.extend(cell.split()) for regexp, tag in self.tag_re: if regexp.search(title): tags[tag] = None title = regexp.sub('', title).strip() for st in showtimes: starts_at = times.to_universal( datetime.datetime.combine( day, datetime.time(*[int(n) for n in st.split(':')])), 'Europe/Prague') yield Showtime( cinema=self.cinema, film_scraped=ScrapedFilm(title_main_scraped=title, **details), starts_at=starts_at, tags=tags, url='http://www.cinemacity.cz/', )
def _parse_html(self, html): y = times.now().year m = 11 day = None for p in html.cssselect('.content p'): text = p.text_content(whitespace=True).strip() lines = text.splitlines() header = ''.join(lines[0:1]) film = ''.join([l for l in lines if u'Zimní kino' in l]) match = self.day_re.match(header) if match: if u'prosinec' in header: m += 1 day = date(day=int(match.group(1)), month=m, year=y) if film: match = self.title_re.search(film) if match: t = time( hour=int(match.group(1)), minute=int(match.group(2)) ) starts_at = times.to_universal( datetime.combine(day, t), 'Europe/Prague' ) title_main = match.group(3) yield Showtime( cinema=cinema, film_scraped=ScrapedFilm( title_main_scraped=title_main, ), starts_at=starts_at, url=self.url, )
def _parse_item(self, item): title_main = item.cssselect_first('.program-title').text_content() url = item.cssselect_first('.program-title').link() date_el = item.cssselect_first('.program-date').text_content() date, time = re.split(r'\s+ve?\s+', date_el) starts_at = times.to_universal( datetime.datetime.combine( parsers.date_cs(date), datetime.time(*[int(n) for n in time.split(':')])), 'Europe/Prague') details = self._parse_details(url) return Showtime( cinema=cinema, film_scraped=ScrapedFilm(title_main_scraped=title_main, url=url, **details), starts_at=starts_at, url=self.url, )
def _parse_standalone_dates(self, dates_text): """Takes text with date & time information, parses out and generates standalone showtimes. """ dates_text = self.range_re.sub('', dates_text) for match in self.standalone_re.finditer(dates_text): date_args_list = [] # standalone date date_args_list.append( map( int, [ self._determine_year(match.group(2)), # year match.group(2), # month match.group(1), # day ])) # date+date, let's process the second one if match.group(3): date_args_list.append( map( int, [ self._determine_year(match.group(5)), # year match.group(5), # month match.group(4), # day ])) # parse times time_args_list = self._parse_times(match.group(6)) # construct and yield datetimes for date_args in date_args_list: for time_args in time_args_list: yield times.to_universal( datetime(*(date_args + time_args)), self.tz)
def fetch_user_events(login): logger = logging.getLogger('events') logger.debug('fetching events for %s' % (login,)) db = get_db() user = db.users.find_one({'login': login}) # calculating time of last saved event last_item = list(db.received_events.find({'gitorama.login': user['login']}).sort([('created_at', -1)])[:1]) if last_item: last_item = last_item[0]['id'] # this is a GitHub's event id else: last_item = None gh = net.GitHub(token=user['gitorama']['token']) for event in gh.get_iter('/users/{0}/received_events'.format(user['login']), per_page=30): if event['id'] == last_item: # don't fetch more than needed # this item already saved break event['created_at'] = times.to_universal(event['created_at']) event['gitorama'] = {'login': user['login']} db.received_events.save(event)
def __init__(self, source): self.source = path(source).abspath() """The absolute path to the source file for the post.""" self.html_template_path = 'theme/post_detail.html' """The path to the template to use to transform the post into HTML.""" self.markdown_template_path = 'core/post.md' """The path to the template to use to transform the post back into a :ref:`post source file <posts>`.""" # This will get set to `True in _parse_source if the source file has 'fenced metadata' (like Jekyll) self._fence = False metadata, self._content_raw = self._parse_source() if not hasattr(self, 'content_preprocessed'): self.content_preprocessed = self.content_raw # Handle any preprocessor plugins for plugin in PostProcessor.plugins: plugin.preprocess(self, metadata) self.title = metadata.pop( 'title', self.source.namebase.replace('-', ' ').replace('_', ' ').title()) """The title of the post.""" self.slug = metadata.pop('slug', slugify(self.title)) """The slug for the post.""" self.tags = wrap_list(metadata.pop('tags', [])) """A list of strings representing the tags applied to the post.""" self.link = metadata.pop('link', None) """The post's :ref:`external link <post link>`.""" self.via = metadata.pop('via', None) """The post's attribution name.""" self.via_link = metadata.pop('via-link', metadata.pop('via_link', None)) """The post's attribution link.""" try: self.status = Status(metadata.pop('status', Status.draft.name)) """The status of the post (published or draft).""" except ValueError: logger.warning( "'%s': Invalid status value in metadata. Defaulting to 'draft'." % self.title) self.status = Status.draft self.timestamp = metadata.pop('timestamp', None) """The date/time the post was published or written.""" if self.timestamp is None: self.timestamp = times.now() utctime = True else: utctime = False if not isinstance(self.timestamp, datetime): # looks like the timestamp from YAML wasn't directly convertible to a datetime, so we need to parse it self.timestamp = parser.parse(str(self.timestamp)) if self.timestamp.tzinfo is not None: # parsed timestamp has an associated timezone, so convert it to UTC self.timestamp = times.to_universal(self.timestamp) elif not utctime: # convert to UTC assuming input time is in the DEFAULT_TIMEZONE self.timestamp = times.to_universal(self.timestamp, settings.POST_TIMEZONE) self.content = Post.convert_to_html(self.content_preprocessed) """The post's content in HTML format.""" # determine the URL based on the HOME_URL and the PERMALINK_STYLE settings permalink = settings.PERMALINK_STYLE.format( year=unicode(self.timestamp_local.year), month=u'{0:02d}'.format(self.timestamp_local.month), day=u'{0:02d}'.format(self.timestamp_local.day), i_month=self.timestamp_local.month, i_day=self.timestamp_local.day, title=self.slug, # for Jekyll compatibility slug=self.slug, timestamp=self.timestamp_local, post=self) if permalink.endswith('index.html'): permalink = permalink[:-10] elif permalink.endswith('.html') or permalink.endswith('/'): pass else: permalink += '.html' self._permalink = permalink # keep track of any remaining properties in the post metadata metadata.pop( 'url', None) # remove the url property from the metadata dict before copy self.custom_properties = copy(metadata) """A dict of any custom metadata properties specified in the post.""" # handle any postprocessor plugins for plugin in PostProcessor.plugins: plugin.postprocess(self) # update cache settings.POST_CACHE[self.source] = self
def to_date(date_str): if date_str is None: return None else: return times.to_universal(as_text(date_str))
def test_to_universal_rejects_no_tzinfo(self): # noqa """Converting to universal times requires source timezone""" now = datetime.now() with self.assertRaises(ValueError): times.to_universal(now)
def make_utc_timestamp(timestamp, timezone='Europe/Madrid'): if not timestamp: return None return times.to_universal(timestamp, timezone).isoformat('T') + 'Z'
def test_to_universal_rejects_non_date_arguments(self): """to_universal rejects non-date arguments""" with self.assertRaises(TypeError): times.to_universal([1, 2, 3])