def update(self, trakt_movie, session): """Updates this record from the trakt media object `trakt_movie` returned by the trakt api.""" if self.id and self.id != trakt_movie['ids']['trakt']: raise Exception('Tried to update db movie with different movie data') elif not self.id: self.id = trakt_movie['ids']['trakt'] self.slug = trakt_movie['ids']['slug'] self.imdb_id = trakt_movie['ids']['imdb'] self.tmdb_id = trakt_movie['ids']['tmdb'] for col in [ 'title', 'overview', 'runtime', 'rating', 'votes', 'language', 'tagline', 'year', 'trailer', 'homepage', ]: setattr(self, col, trakt_movie.get(col)) if trakt_movie.get('released'): self.released = dateutil_parse(trakt_movie.get('released'), ignoretz=True).date() self.updated_at = dateutil_parse(trakt_movie.get('updated_at'), ignoretz=True) self.genres = [TraktGenre(name=g.replace(' ', '-')) for g in trakt_movie.get('genres', [])] self.cached_at = datetime.now() self.translation_languages = trakt_movie.get('available_translations', [])
def check(self, value): if not value: self._rant(value) try: dateutil_parse(value) except: self._rant(value)
def get_events(self, start, end): """Retrieve a list of event in the time between start and stop""" if not self._calendar_id: raise EventImporterError("No calendar has been selected") response = ( self.service.events() .list( calendarId=self._calendar_id, orderBy="startTime", singleEvents=True, timeMin=start.isoformat(), timeMax=end.isoformat(), ) .execute() ) events = response["items"] for event in events: event["start"] = dateutil_parse(event["start"]["dateTime"]) event["end"] = dateutil_parse(event["end"]["dateTime"]) event["created"] = dateutil_parse(event["created"]) event["updated"] = dateutil_parse(event["updated"]) # check if there were more results if response.get("nextPageToken"): raise EventImporterError("There were more") return events
def update(self, trakt_show, session): """Updates this record from the trakt media object `trakt_show` returned by the trakt api.""" if self.id and self.id != trakt_show['ids']['trakt']: raise Exception('Tried to update db show with different show data') elif not self.id: self.id = trakt_show['ids']['trakt'] self.slug = trakt_show['ids']['slug'] self.imdb_id = trakt_show['ids']['imdb'] self.tmdb_id = trakt_show['ids']['tmdb'] self.tvrage_id = trakt_show['ids']['tvrage'] self.tvdb_id = trakt_show['ids']['tvdb'] if trakt_show.get('airs'): airs = trakt_show.get('airs') self.air_day = airs.get('day') self.timezone = airs.get('timezone') if airs.get('time'): self.air_time = datetime.strptime(airs.get('time'), '%H:%M').time() else: self.air_time = None if trakt_show.get('first_aired'): self.first_aired = dateutil_parse(trakt_show.get('first_aired'), ignoretz=True) else: self.first_aired = None self.updated_at = dateutil_parse(trakt_show.get('updated_at'), ignoretz=True) for col in ['overview', 'runtime', 'rating', 'votes', 'language', 'title', 'year', 'runtime', 'certification', 'network', 'country', 'status', 'aired_episodes', 'trailer', 'homepage']: setattr(self, col, trakt_show.get(col)) # Sometimes genres and translations are None but we really do want a list, hence the "or []" self.genres = [TraktGenre(name=g.replace(' ', '-')) for g in trakt_show.get('genres') or []] self.cached_at = datetime.now() self.translation_languages = trakt_show.get('available_translations') or []
def steamprofile_detail(request, username): # See if we've got the profile already. try: profile = SteamProfile.objects.get(username=username, import_me=True) except SteamProfile.DoesNotExist: return steamprofile_add(request, username) start_date = timezone.now() - datetime.timedelta(days=1) end_date = timezone.now() if request.GET.get('start_date'): start_date = dateutil_parse(request.GET['start_date']) if request.GET.get('end_date'): end_date = dateutil_parse(request.GET['end_date']) if end_date < start_date: start_date, end_date = end_date, start_date friends_data = profile.friends_data(start_date, end_date) return render(request, 'steamstalker/steamprofile_detail.html', { 'start_date': start_date, 'end_date': end_date, 'friends_seen_json': simplejson.dumps(friends_data), 'username': username, })
def call_api(self): weather = {} weather['error'] = False try: weather_json = requests.get(W_GOV_WEATHER_URL).json() # List of hourly weather forecast. Index is number of hours # into the future from now (0 = now, 1 = next hour, etc.) hourly_weather = weather_json['properties']['periods'] weather['current_temp'] = int(hourly_weather[0]['temperature']) logger.info("Got current temp {}".format(weather['current_temp'])) weather['forecast'] = None for hour_weather in hourly_weather[0:WEATHER_FORECAST_HOURS+1]: # Find the most severe weather in this interval. Use the # icon to determine the weather, because that's the most # pared down. https://api.weather.gov/icons forecast_icon_url = hour_weather['icon'] icon_path = urlparse(forecast_icon_url).path # Sometimes, the icon path will have a comma and a number after # like: /icons/land/night/rain_showers,20 icon_re = re.compile('/[A-Za-z_]+') re_matches = icon_re.findall(icon_path) if re_matches: # Icon name will be the last match. Also, strip off the # leading slash from the regex match icon_name = re_matches[-1][1:] matched_forecast = W_GOV_ICON_2_WEATHER[icon_name] if weather.get('forecast'): # Replace the forecast with the more severe weather weather['forecast'] = max(weather['forecast'], matched_forecast) else: # First valid forecast weather['forecast'] = matched_forecast logger.info("Got forecast {}".format(weather['forecast'])) except Exception: logger.exception('Exception during weather API call.') weather['error'] = True try: astro_args = {'formatted': 0} # Get a full date/time string astro_json = requests.get(ASTRO_API_URL, params=astro_args).json() sunrise_str = astro_json['results']['sunrise'] sunset_str = astro_json['results']['sunset'] local_tz = tz.tzlocal() # Since we only can get the UTC sunrise/sunset, it may be for the # next day, so convert it to the current day now = datetime.now() sunrise_utc = dateutil_parse(sunrise_str).astimezone(local_tz) sunrise = datetime.combine(date=now.date(), time=sunrise_utc.time()) sunset_utc = dateutil_parse(sunset_str).astimezone(local_tz) sunset = datetime.combine(date=now.date(), time=sunset_utc.time()) weather['sunrise'] = sunrise weather['sunset'] = sunset logger.info("Got sunrise {} and sunset {}".format( weather['sunrise'], weather['sunset'])) except Exception: logger.exception('Exception during astro API call.') weather['error'] = True return weather
def is_current_repo(repo): ''' Return True for a current repo, False otherwise. ''' if repo['pushed_at'] is None: # # Never pushed means probably empty? # logging.debug('%(name)s has never been pushed' % repo) return False create_cutoff = datetime(2013, 5, 6, tzinfo=tzutc()) push_cutoff = datetime.now(tzutc()) - timedelta(days=30) created_at = dateutil_parse(repo['created_at']) pushed_at = dateutil_parse(repo['pushed_at']) if created_at > create_cutoff: # # Repository created after May 2013, when we started looking. # logging.debug('%(name)s created recently enough: %(created_at)s' % repo) return True if pushed_at > push_cutoff: # # Repository pushed within the past 30 days. # logging.debug('%(name)s updated recently enough: %(pushed_at)s' % repo) return True logging.debug('%(name)s is too old: %(pushed_at)s' % repo) return False
def update_watched_cache(style_ident, username=None, account=None): if account and not username: username = '******' url = get_api_url('users', username, 'watched', style_ident) session = get_session(account=account) try: data = session.get(url).json() if not data: log.warning('No watched data returned from trakt.') return cache = get_user_cache(username=username, account=account)['watched'][style_ident] log.verbose('Received %d record(s) from trakt.tv %s\'s watched history', len(data), username) if style_ident == 'movies': for movie in data: movie_id = movie['movie']['ids']['trakt'] cache[movie_id] = movie['movie'] cache[movie_id]['watched_at'] = dateutil_parse(movie['last_watched_at'], ignoretz=True) cache[movie_id]['plays'] = movie['plays'] else: for series in data: series_id = series['show']['ids']['trakt'] cache[series_id] = series['show'] cache[series_id]['seasons'] = series['seasons'] cache[series_id]['watched_at'] = dateutil_parse(series['last_watched_at'], ignoretz=True) cache[series_id]['plays'] = series['plays'] except requests.RequestException as e: raise plugin.PluginError('Unable to get data from trakt.tv: %s' % e)
def update(self, trakt_show, session): """Updates this record from the trakt media object `trakt_show` returned by the trakt api.""" if self.id and self.id != trakt_show['ids']['trakt']: raise Exception('Tried to update db show with different show data') elif not self.id: self.id = trakt_show['ids']['trakt'] self.slug = trakt_show['ids']['slug'] self.imdb_id = trakt_show['ids']['imdb'] self.tmdb_id = trakt_show['ids']['tmdb'] self.tvrage_id = trakt_show['ids']['tvrage'] self.tvdb_id = trakt_show['ids']['tvdb'] if trakt_show.get('air_time'): self.air_time = dateutil_parse(trakt_show.get('air_time'), ignoretz=True) else: self.air_time = None if trakt_show.get('first_aired'): self.first_aired = dateutil_parse(trakt_show.get('first_aired'), ignoretz=True) else: self.first_aired = None self.updated_at = dateutil_parse(trakt_show.get('updated_at'), ignoretz=True) for col in ['overview', 'runtime', 'rating', 'votes', 'language', 'title', 'year', 'air_day', 'runtime', 'certification', 'network', 'country', 'status', 'aired_episodes']: setattr(self, col, trakt_show.get(col)) self.genres[:] = get_db_genres(trakt_show.get('genres', []), session) self.cached_at = datetime.now()
def out_of_range(d): ret = None try: dateutil_parse(d, fuzzy=True, default=DEFAULT_DATETIME) except ValueError: ret = True return ret
def _import_organization(self, info): if info['type'] not in TYPE_MAP: return org = {'origin_id': info['id']} org['id'] = origin_id_to_id(org['origin_id']) org['type'] = TYPE_MAP[info['type']] if org['type'] in ['introducer', 'introducer_field']: self.skip_orgs.add(org['origin_id']) return org['name'] = {'fi': info['name_fin'], 'sv': info['name_swe']} if info['shortname']: org['abbreviation'] = info['shortname'] # FIXME: Use maybe sometime DUPLICATE_ABBREVS = [ 'AoOp', 'Vakaj', 'Talk', 'KIT', 'HTA', 'Ryj', 'Pj', 'Sotep', 'Hp', 'Kesvlk siht', 'Kulttj', 'HVI', 'Sostap', 'KOT', 'Lsp', 'Kj', 'KYT', 'AST', 'Sote', 'Vp', 'HHE', 'Tj', 'HAKE', 'Ko' ] abbr = org.get('abbreviation', None) if org['type'] in ('council', 'committee', 'board_division', 'board'): org['slug'] = slugify(org['abbreviation']) else: org['slug'] = slugify(org['origin_id']) org['founding_date'] = None if info['start_time']: d = dateutil_parse(info['start_time']) # 2009-01-01 means "no data" if not (d.year == 2009 and d.month == 1 and d.day == 1): org['founding_date'] = d.date().strftime('%Y-%m-%d') org['dissolution_date'] = None if info['end_time']: d = dateutil_parse(info['end_time']) org['dissolution_date'] = d.date().strftime('%Y-%m-%d') org['contact_details'] = [] if info['visitaddress_street'] or info['visitaddress_zip']: cd = {'type': 'address'} cd['value'] = info.get('visitaddress_street', '') z = info.get('visitaddress_zip', '') if z and len(z) == 2: z = "00%s0" % z cd['postcode'] = z org['contact_details'].append(cd) org['modified_at'] = dateutil_parse(info['modified_time']) parent = info['parent'] if parent and parent not in self.skip_orgs: org['parent'] = origin_id_to_id(parent) else: org['parent'] = None self.save_organization(org)
def handle(self, *items, **options): self.verbosity = int(options.get('verbosity', 1)) self.batchsize = options.get('batchsize', DEFAULT_BATCH_SIZE) self.start_date = None self.end_date = None self.remove = options.get('remove', False) self.workers = int(options.get('workers', 0)) self.commit = options.get('commit', True) self.filterfiled = options.get('filed', None) self.filterkey = options.get('filedkey', None) # self.startid = options.get('startid', 0) if sys.version_info < (2, 7): warnings.warn('multiprocessing is disabled on Python 2.6 and earlier. ' 'See https://github.com/toastdriven/django-haystack/issues/1001') self.workers = 0 self.backends = options.get('using') if not self.backends: self.backends = haystack_connections.connections_info.keys() age = options.get('age', DEFAULT_AGE) start_date = options.get('start_date') end_date = options.get('end_date') if age is not None: self.start_date = now() - timedelta(hours=int(age)) if start_date is not None: from dateutil.parser import parse as dateutil_parse try: self.start_date = dateutil_parse(start_date) except ValueError: pass if end_date is not None: from dateutil.parser import parse as dateutil_parse try: self.end_date = dateutil_parse(end_date) except ValueError: pass if not items: items = haystack_load_apps() return super(Command, self).handle(*items, **options)
def test_log_xml(self): hg = self._mk_local_repo() log = hg.log(backend='xml')[0] self.assertEquals('690216eee7b291ac9dca0164d660576bdba51d47', log[-1]['node']) expects = [{'node': 'b26fba69aa7b0378bee2a5386f16c14b0f697c18', 'files': [], 'short': 'b26fba69aa7b', 'mess': u'closing', 'branch': u'closed', 'tags': [], 'date': dateutil_parse('2012-03-02T15:50:05+0100') , 'author': u'Jan Florian <*****@*****.**>', 'rev': 3}, {'node': 'eda6840416571d21bcf3d37e9d519fafc3e7c31d', 'files': ['closed', 'meh'], 'short': 'eda684041657' , 'mess': u'uuu', 'branch': u'closed', 'tags': [], 'date': dateutil_parse('2012-03-02T15:49:58+0100'), 'author': u'Jan Florian <*****@*****.**>', 'rev': 2}] self.assertEquals(expects, hg.log(branch='closed', backend='xml')[0])
def export_stats(start_date=None, end_date=None, update_bookmark=True, retry=False): """Export stats events.""" if current_app.config['ZENODO_STATS_PIWIK_EXPORT_ENABLED'] is True: start_date = dateutil_parse(start_date) if start_date else None end_date = dateutil_parse(end_date) if end_date else None try: PiwikExporter().run(start_date=start_date, end_date=end_date, update_bookmark=update_bookmark) except Exception as exc: if retry: export_stats.retry(exc=exc)
def handle(self, *items, **options): self.verbosity = int(options.get('verbosity', 1)) self.batchsize = options.get('batchsize', DEFAULT_BATCH_SIZE) self.start_date = None self.end_date = None self.remove = options.get('remove', False) self.using = options.get('using') self.workers = int(options.get('workers', 0)) self.backend = haystack_connections[self.using].get_backend() age = options.get('age', DEFAULT_AGE) start_date = options.get('start_date') end_date = options.get('end_date') if age is not None: self.start_date = now() - timedelta(minutes=int(age)) options['start_date'] = self.start_date if start_date is not None: from dateutil.parser import parse as dateutil_parse try: self.start_date = dateutil_parse(start_date) options['start_date'] = self.start_date except ValueError: pass if end_date is not None: from dateutil.parser import parse as dateutil_parse try: self.end_date = dateutil_parse(end_date) options['end_date'] = self.end_date except ValueError: pass if not items: from django.db.models import get_app # Do all, in an INSTALLED_APPS sorted order. items = [] for app in settings.INSTALLED_APPS: try: app_label = app.split('.')[-1] loaded_app = get_app(app_label) items.append(app_label) except: # No models, no problem. pass return super(Command, self).handle(*items, **options)
def _read_file_header(self): """Reads out the file header for the arc file. If version was not provided, this will autopopulate it.""" header = self.fileobj.readline().decode('utf-8') payload1 = self.fileobj.readline().decode('utf-8') payload2 = self.fileobj.readline().decode('utf-8') version, reserved, organisation = payload1.split(maxsplit=2) self.header_read = True version = int(version) # print("--------------------------------------------------") # print(header, "\n", payload1, "\n", payload2, "\n", version) # print("--------------------------------------------------") if self.version and int(self.version) != version: raise IOError("Version mismatch. Requested version was '%s' but " "version in file was '%s'" % (self.version, version)) if version == 1: url, ip_address, date, content_type, length = header.split() self.file_headers = { "ip_address": ip_address, "date": dateutil_parse(date), "org": organisation, "url": url, 'content_type': content_type, 'length': int(length), } self.version = 1 elif version == 2: (url, ip_address, date, content_type, result_code, checksum, location, offset, filename, length) = header.split() self.file_headers = { "ip_address": ip_address, "date": dateutil_parse(date), "org": organisation, 'url': url, 'content_type': content_type, 'length': int(length), 'filename': filename, 'location': location, } self.version = 2 else: raise IOError("Unknown ARC version '%s'" % version) length = int(length) current_size = len(payload1 + payload2) self.file_meta = b'' while current_size < length: line = self.fileobj.readline() self.file_meta = self.file_meta + line current_size = current_size + len(line)
def export(self, filename, starttime=None, endtime=None, duration=None): """Returns a PCAP file, potentially including your specified starttime, endtime or duration. Internally uses editcap :param str filename: the name of the new PCAP file to be created/exported from the existing PCAP file :param str starttime: defines a start time filter :param str endtime: defines an end time filter :param str duration: defines a duration filter """ cmd = ['editcap'] if starttime is not None: if isinstance(starttime, basestring): starttime = dateutil_parse(starttime) if endtime is not None: if isinstance(endtime, basestring): endtime = dateutil_parse(endtime) if duration is not None: if isinstance(duration, basestring): duration = parse_timedelta(duration) if starttime: endtime = starttime + duration elif endtime: starttime = endtime - duration else: raise ValueError("Must specify either starttime or " "endtime with duration") if starttime is not None: cmd.extend(['-A', (starttime .strftime('%Y-%m-%d %H:%M:%S'))]) if endtime is not None: cmd.extend(['-B', (endtime .strftime('%Y-%m-%d %H:%M:%S'))]) cmd.append(self.filename) cmd.append(filename) logger.info('subprocess: %s' % ' '.join(cmd)) subprocess.check_output(cmd) return PcapFile(filename)
def handle(self, **options): self.verbosity = int(options.get('verbosity', 1)) self.batchsize = options.get('batchsize', DEFAULT_BATCH_SIZE) self.start_date = None self.end_date = None self.remove = options.get('remove', False) self.workers = options.get('workers', 0) self.commit = options.get('commit', True) self.max_retries = options.get('max_retries', DEFAULT_MAX_RETRIES) self.backends = options.get('using') if not self.backends: self.backends = haystack_connections.connections_info.keys() age = options.get('age', DEFAULT_AGE) start_date = options.get('start_date') end_date = options.get('end_date') if self.verbosity > 2: LOG.setLevel(logging.DEBUG) elif self.verbosity > 1: LOG.setLevel(logging.INFO) if age is not None: self.start_date = now() - timedelta(hours=int(age)) if start_date is not None: from dateutil.parser import parse as dateutil_parse try: self.start_date = dateutil_parse(start_date) except ValueError: pass if end_date is not None: from dateutil.parser import parse as dateutil_parse try: self.end_date = dateutil_parse(end_date) except ValueError: pass labels = options.get('app_label') or haystack_load_apps() for label in labels: for using in self.backends: try: self.update_backend(label, using) except: LOG.exception("Error updating %s using %s ", label, using) raise
def handle(self, *items, **options): self.verbosity = int(options.get("verbosity", 1)) self.batchsize = options.get("batchsize", DEFAULT_BATCH_SIZE) self.start_date = None self.end_date = None self.remove = options.get("remove", False) self.using = options.get("using") self.workers = int(options.get("workers", 0)) self.backend = haystack_connections[self.using].get_backend() age = options.get("age", DEFAULT_AGE) start_date = options.get("start_date") end_date = options.get("end_date") if age is not None: self.start_date = datetime.datetime.now() - datetime.timedelta(hours=int(age)) if start_date is not None: from dateutil.parser import parse as dateutil_parse try: self.start_date = dateutil_parse(start_date) except ValueError: pass if end_date is not None: from dateutil.parser import parse as dateutil_parse try: self.end_date = dateutil_parse(end_date) except ValueError: pass if not items: from django.db.models import get_app # Do all, in an INSTALLED_APPS sorted order. items = [] for app in settings.INSTALLED_APPS: try: app_label = app.split(".")[-1] loaded_app = get_app(app_label) items.append(app_label) except: # No models, no problem. pass return super(Command, self).handle(*items, **options)
def robust_date_parser(d): """ Robust wrapper around some date parsing libs, making a best effort to return a single 8601 date from the input string. No range checking is performed, and any date other than the first occuring will be ignored. We use timelib for its ability to make at least some sense of invalid dates, e.g. 2012/02/31 -> 2012/03/03 We rely only on dateutil.parser for picking out dates from nearly arbitrary strings (fuzzy=True), but at the cost of being forgiving of invalid dates in those kinds of strings. Returns None if it fails """ dd = dateparser.to_iso8601(d) if dd is None or out_of_range(d): try: dd = dateutil_parse(d, fuzzy=True, default=DEFAULT_DATETIME) if dd.year == DEFAULT_DATETIME.year: dd = None except Exception: try: dd = timelib.strtodatetime(d, now=DEFAULT_DATETIME_SECS) except ValueError: pass except Exception as e: logger.error(e) if dd: ddiso = dd.isoformat() return ddiso[:ddiso.index('T')] return dd
def normalizeimage(image_desc, copy=False): """ :param image_desc: an image description as returned from |docker.Client.images|_, |docker.Client.inspect_image|_, etc. :param copy: if :const:`True`, make a copy of :obj:`image_desc` before performing any normalizations :returns: the normalized image description (:obj:`image_desc` if :obj:`copy` is :const:`False`) This method is attempts to address certain `Docker API inconsistencies <https://github.com/docker/docker/issues/5893#issuecomment-102398746>`__. The following keys are added to :obj:`image_desc`: * :attr:`':id'` - a normalized :attr:`'Id'` * :attr:`':short_id'` - the first 12 hexidecimal characters from :attr:`':id'` * :attr:`':parent_id'` - a normalized :attr:`'ParentId'` or :attr:`'Parent'` * :attr:`':created_dt'` - a timezone-aware :class:`datetime` object representing :attr:`'Created'` * :attr:`':repo_tags'` - a normalized :attr:`'RepoTags'`, including any short names (i.e., those implying ``:latest``) """ if copy: image = deepcopy(image_desc) else: image = image_desc image_id = image.get('Id', image.get('id')).lower() image[':id'] = image_id image[':parent_id'] = image.get('ParentId', image.get('Parent', image.get('parent', ''))).lower() image_short_id = image_id[:12] image[':short_id'] = image_short_id image_created = image.get('Created', image.get('created')) if isinstance(image_created, int): # Work-around for # <https://github.com/PythonCharmers/python-future/issues/144> and # <https://bitbucket.org/pypy/pypy/issue/2048/datetimeutcfromtimestamp-barfs-when> image_created = native(image_created) image[':created_dt'] = datetime.utcfromtimestamp(image_created).replace(tzinfo=TZ_UTC) else: image[':created_dt'] = dateutil_parse(image_created) image[':repo_tags'] = [] for repo_tag in image.get('RepoTags', ()): if repo_tag == '<none>:<none>': continue repo, tag = repo_tag.split(':') if tag == 'latest': image[':repo_tags'].append(repo) image[':repo_tags'].append(repo_tag) return image
def on_task_filter(self, task, config): for entry in task.entries: field = config['field'] if field not in entry: entry.fail('Field {0} does not exist'.format(field)) continue field_value = entry[field] if isinstance(field_value, datetime): field_date = field_value elif isinstance(field_value, float): field_date = datetime.fromtimestamp(field_value) elif isinstance(field_value, str): try: field_date = dateutil_parse(entry[field]) except ValueError: log.warning('Entry %s ignored: %s is not a valid date', entry['title'], field_value) continue else: log.warning('Entry %s ignored: %s is not a valid date', entry['title'], field_value) continue age_cutoff = datetime.now() - parse_timedelta(config['age']) if field_date < age_cutoff: info_string = 'Date in field `{0}` is older than {1}'.format(field, config['age']) if config['action'] == 'accept': entry.accept(info_string) else: entry.reject(info_string) log.debug('Entry %s was %sed because date in field `%s` is older than %s', entry['title'], config['action'], field, config['age'])
def update(self, trakt_movie, session): """Updates this record from the trakt media object `trakt_movie` returned by the trakt api.""" if self.id and self.id != trakt_movie['ids']['trakt']: raise Exception('Tried to update db movie with different movie data') elif not self.id: self.id = trakt_movie['ids']['trakt'] self.slug = trakt_movie['ids']['slug'] self.imdb_id = trakt_movie['ids']['imdb'] self.tmdb_id = trakt_movie['ids']['tmdb'] for col in ['title', 'overview', 'runtime', 'rating', 'votes', 'language', 'tagline', 'year']: setattr(self, col, trakt_movie.get(col)) if self.released: self.released = dateutil_parse(trakt_movie.get('released'), ignoretz=True) self.updated_at = dateutil_parse(trakt_movie.get('updated_at'), ignoretz=True) self.genres[:] = get_db_genres(trakt_movie.get('genres', []), session) self.cached_at = datetime.now()
def parse_date(string): match = date_pattern.match(string) if not match: try: date = dateutil_parse(string) except ValueError: # we could not parse it, give up return None else: return date data = match.groupdict() # normalize values for key, value in data.iteritems(): if value is None: pass elif key != 'offset': data[key] = int(value) # make timezone offset if data['offset'] in ('z', 'Z', None): data['tzinfo'] = tzutc() else: plusminus = 1 if data['offset'] == '+' else -1 seconds = 3600 * data['offset_hour'] + 60 * data['offset_minute'] data['tzinfo'] = tzoffset(None, plusminus * seconds) # remove unused data for k in ('offset', 'offset_hour', 'offset_minute'): del data[k] # done return datetime.datetime(**data)
def _get_last_tweeted(self): if not hasattr(self, '_last_tweeted_date'): data = redis.get('last_tweeted_date') if data is None: raise LastTweetedDateNotSet() self._last_tweeted_date = dateutil_parse(data).date() return self._last_tweeted_date
def update_user_ratings_cache(style_ident, username=None, account=None): if account and not username: username = '******' url = get_api_url('users', username, 'ratings', style_ident) session = get_session(account=account) try: data = session.get(url).json() if not data: log.warning('No user ratings data returned from trakt.') return cache = get_user_cache(username=username, account=account)['user_ratings'] log.verbose('Received %d record(s) from trakt.tv %s\'s %s user ratings', len(data), username, style_ident) for item in data: # get the proper cache from the type returned by trakt item_type = item['type'] item_cache = cache[item_type + 's'] # season cannot be put into shows because the code would turn to spaghetti later when retrieving from cache # instead we put some season info inside the season cache key'd to series id # eg. cache['seasons'][<show_id>][<season_number>] = ratings and stuff if item_type == 'season': show_id = item['show']['ids']['trakt'] season = item['season']['number'] item_cache.setdefault(show_id, {}) item_cache[show_id].setdefault(season, {}) item_cache = item_cache[show_id] item_id = season else: item_id = item[item_type]['ids']['trakt'] item_cache[item_id] = item[item_type] item_cache[item_id]['rated_at'] = dateutil_parse(item['rated_at'], ignoretz=True) item_cache[item_id]['rating'] = item['rating'] except requests.RequestException as e: raise plugin.PluginError('Unable to get data from trakt.tv: %s' % e)
def get_parsed_value(self, value): """ Helper to cast string to datetime using :member:`parse_format`. :param value: String representing a datetime :type value: str :return: datetime """ def get_parser(parser_desc): try: return parser_desc['parser'] except TypeError: try: return get_parser(self.date_parsers[parser_desc]) except KeyError: return parser_desc except KeyError: pass parser = get_parser(self.parse_format) if parser is None: try: return dateutil_parse(value) except ValueError: return None if callable(parser): return parser(value) return datetime.strptime(value, parser)
def update(self, actor, session): if self.id and self.id != actor.get('ids').get('trakt'): raise Exception('Tried to update db actors with different actor data') elif not self.id: self.id = actor.get('ids').get('trakt') self.name = actor.get('name') ids = actor.get('ids') self.imdb = ids.get('imdb') self.slug = ids.get('slug') self.tmdb = ids.get('tmdb') self.biography = actor.get('biography') if actor.get('birthday'): self.birthday = dateutil_parse(actor.get('birthday')) if actor.get('death'): self.death = dateutil_parse(actor.get('death')) self.homepage = actor.get('homepage')
def get_time(claims, propertyName, locale, defaultValue=None): propValue = claims.get(propertyName, {}) if len(propValue) == 0: return defaultValue result = [] for e in propValue: mainsnak = e.get('mainsnak', {}) datavalue = mainsnak.get('datavalue', {}) if datavalue is not None: value = datavalue.get('value', '') result.append(value.get('time', '')) if len(result) == 0: date_string = defaultValue else: date_string = ', '.join(result) try: parsed_date = datetime.strptime(date_string, "+%Y-%m-%dT%H:%M:%SZ") except: if date_string.startswith('-'): return date_string.split('T')[0] try: parsed_date = dateutil_parse(date_string, fuzzy=False, default=False) except: logger.debug('could not parse date %s', date_string) return date_string.split('T')[0] return format_date_by_locale(parsed_date, locale)
def parse_date(date): if date is None or not len(date.strip()): return try: return dateutil_parse(date).date().isoformat() except: return
def test_ok_if_order_in_allowed_status(self, allowed_status, force): """ Test that the order can be cancelled if it's in one of the allowed statuses. """ reason = CancellationReason.objects.order_by('?').first() order = OrderFactory(status=allowed_status) adviser = AdviserFactory() with freeze_time('2018-07-12 13:00'): order.cancel(by=adviser, reason=reason, force=force) order.refresh_from_db() assert order.status == OrderStatus.cancelled assert order.cancelled_on == dateutil_parse('2018-07-12T13:00Z') assert order.cancellation_reason == reason assert order.cancelled_by == adviser
def as_date(val): '''Casts the value to a ``datetime.date`` object if possible Else raises ``TypeError`` ''' # Important to check if datetime first because datetime.date objects # pass the isinstance(obj, datetime.date) test if isinstance(val, datetime.datetime): return val.date() elif isinstance(val, datetime.date): return val elif isinstance(val, basestring): return dateutil_parse(val).date() else: raise TypeError('date cannot be of type "{}".'.format(type(val)) + ' It must be able to be cast to a datetime.date')
def __init__(self, node): self.original = node self.pykube_node = node metadata = node.obj['metadata'] self.name = metadata['name'] self.index = int(self.name.split('-')[3]) self.region, self.instance_type = self._get_instance_data() self.selectors = metadata['labels'] # self.capacity = KubeResource(**node.obj['status']['capacity']) self.capacity = None self.used_capacity = KubeResource() self.unschedulable = node.obj['spec'].get('unschedulable', False) self.creation_time = dateutil_parse(metadata['creationTimestamp']) self.instance_index = utils.get_instance_index(node)
def __new__(cls, t): if isinstance(t, (str, bytes)) and t.isdigit(): t = int(t) if not isinstance(t, (str, bytes)): from dateutil.tz import tzutc return datetime.fromtimestamp(t // 1000, tz=tzutc()) try: units = {"weeks", "days", "hours", "minutes", "seconds"} diffs = {u: float(t[:-1]) for u in units if u.startswith(t[-1])} if len(diffs) == 1: return datetime.now().replace(microsecond=0) + relativedelta( **diffs) return dateutil_parse(t) except (ValueError, OverflowError, AssertionError): raise ValueError( 'Could not parse "{}" as a timestamp or time delta'.format(t))
def test_serialize(self): site = SiteFactory() course_overview = CourseOverviewFactory() in_data = dict(month_for=datetime.date(2019, 10, 29), count=42, course_id=str(course_overview.id), domain=u'wookie.example.com') serializer = CourseMauLiveMetricsSerializer(in_data) out_data = serializer.data assert set(out_data.keys()) == set(in_data.keys()) assert out_data['count'] == in_data['count'] assert dateutil_parse( out_data['month_for']).date() == in_data['month_for'] assert out_data['domain'] == in_data['domain'] assert out_data['course_id'] == in_data['course_id']
def parse_datetime(date_time, loader_context): if isinstance(date_time, datetime): return date_time elif isinstance(date_time, str): try: return dateutil_parse( date_time.strip(), dayfirst=loader_context.get("dayfirst", False), yearfirst=loader_context.get("yearfirst", True), ignoretz=loader_context.get("ignoretz", False), ) except ValueError: # If dateutil can't parse it, it might be a human-readable date. return dateparser.parse(date_time) else: raise ValueError("date_time must be datetime or a str.")
def parse_archive(self, response): # The perks of having a JavaScript frontend ... issues = json.loads( response.css("router-view").re_first( "<router-view :data='([^']+)'>"))["issues"] latest_issue_date = dateutil_parse(issues[sorted( issues.keys())[-1]][-1], ignoretz=True) # The JS frontend calculates the issue number the same way, so this should be # somewhat official. issuenr = "{0[0]}{0[1]}".format(latest_issue_date.date().isocalendar()) return scrapy.Request( response.urljoin(f"/api/archive/{issuenr}?count=1000&from=0"), self.parse_archive_search, meta={"issue_date": latest_issue_date}, )
def get_seconds_from_epoch(timestamp: str) -> float: """If dateutil.parser.parse cannot parse DST timezones (e.g. PDT, EDT) correctly, then use dateparser.parse instead. """ utc_epoch = datetime(1970, 1, 1, tzinfo=timezone.utc) utc_t = None try: with warnings.catch_warnings(): warnings.simplefilter("ignore") utc_t = dateutil_parse(timestamp) except Exception: pass if utc_t is None or utc_t.tzname() not in ("UTC", "Z"): utc_t = dateparser_parse(timestamp) utc_t = utc_t.astimezone(timezone.utc) return (utc_t - utc_epoch).total_seconds()
def get_time_from_str(when): """Convert a string to a time: first uses the dateutil parser, falls back on fuzzy matching with parsedatetime """ zero_oclock_today = datetime.now(tzlocal()).replace( hour=0, minute=0, second=0, microsecond=0) try: event_time = dateutil_parse(when, default=zero_oclock_today) except ValueError: struct, result = fuzzy_date_parse(when) if not result: raise ValueError('Date and time is invalid: %s' % (when)) event_time = datetime.fromtimestamp(time.mktime(struct), tzlocal()) return event_time
def _csc_dcat_search_ckan_datasets(context, data_dict): method_log_prefix = '[%s][_csc_dcat_search_ckan_datasets]' % __name__ #log.debug('%s Init method. Inputs context=%s, data_dict=%s' % (method_log_prefix, context, data_dict)) n = int(config.get('ckanext.dcat.datasets_per_page', DATASETS_PER_PAGE)) limit = data_dict.get('limit', -1) if limit > -1 and limit < n: n = limit page = data_dict.get('page', 1) or 1 try: page = int(page) if page < 1: raise wrong_page_exception except ValueError: raise wrong_page_exception modified_since = data_dict.get('modified_since') if modified_since: try: modified_since = dateutil_parse(modified_since).isoformat() + 'Z' except (ValueError, AttributeError): raise toolkit.ValidationError( 'Wrong modified date format. Use ISO-8601 format') search_data_dict = { 'rows': n, 'start': n * (page - 1), 'sort': 'organization asc, metadata_modified desc', } search_data_dict['q'] = data_dict.get('q', '*:*') search_data_dict['fq'] = data_dict.get('fq') search_data_dict['fq_list'] = [] # Exclude certain dataset types search_data_dict['fq_list'].append('-dataset_type:harvest') search_data_dict['fq_list'].append('-dataset_type:showcase') if modified_since: search_data_dict['fq_list'].append( 'metadata_modified:[{0} TO NOW]'.format(modified_since)) query = toolkit.get_action('package_search')(context, search_data_dict) log.debug('%s End method. Returns query=%s' % (method_log_prefix, query)) return query
def _update_watched_cache(self, cache, media_type, username=None, account=None): watched = db.get_user_data( data_type='watched', media_type=media_type, session=db.get_session(account), username=username, ) for media in watched: media_id = media['ids']['trakt'] cache[media_id] = media cache[media_id]['watched_at'] = dateutil_parse( media['last_watched_at'], ignoretz=True) cache[media_id]['plays'] = media['plays']
def test_you_have_been_added_for_adviser(self, settings): """ Test the notification for when an adviser is added to an order. If the template variables have been changed in GOV.UK notifications this is going to raise HTTPError (400 - Bad Request). """ settings.OMIS_NOTIFICATION_API_KEY = settings.OMIS_NOTIFICATION_TEST_API_KEY notify = Notify() order = OrderFactory() notify.adviser_added( order=order, adviser=AdviserFactory(), by=AdviserFactory(), creation_date=dateutil_parse('2017-05-18'), )
def as_datetime_utc(datetime_string): """Returns `datetime` instance with UTC timezone This helpler function centralizes converting "datetime as a string" to a datetime object in UTC. We use dateutil.parser.parse because it is convenient. However, these tests need to support mulitple versions of Open edX and with that, multiple versions of the dateutil package. While the dateutil package does handle timezone assignment, this approach is more portable. We may want to iterate on this to create a 'datetime_matches' function. However, then we have to consider type checking for the parameters, or enforce one type as string and the other as datetime, or do conversions. Basically, we might be making testing more complicated """ return dateutil_parse(datetime_string).replace(tzinfo=utc)
def test_create(self, order_status): """Test a successful call to create a list of payments.""" order = OrderFactory(status=order_status) url = reverse('api-v3:omis:payment:collection', kwargs={'order_pk': order.pk}) response = self.api_client.post( url, [ { 'transaction_reference': 'some ref1', 'amount': 1, 'received_on': '2017-04-20', }, { 'transaction_reference': 'some ref2', 'amount': order.total_cost - 1, 'method': PaymentMethod.MANUAL, 'received_on': '2017-04-21', }, ], ) assert response.status_code == status.HTTP_201_CREATED response_items = sorted(response.json(), key=itemgetter('transaction_reference')) assert response_items == [ { 'created_on': '2017-04-25T13:00:00Z', 'reference': '201704250001', 'transaction_reference': 'some ref1', 'additional_reference': '', 'amount': 1, 'method': PaymentMethod.BACS, # bacs is the default one 'received_on': '2017-04-20', }, { 'created_on': '2017-04-25T13:00:00Z', 'reference': '201704250002', 'transaction_reference': 'some ref2', 'additional_reference': '', 'amount': order.total_cost - 1, 'method': PaymentMethod.MANUAL, 'received_on': '2017-04-21', }, ] order.refresh_from_db() assert order.status == OrderStatus.PAID assert order.paid_on == dateutil_parse('2017-04-21T00:00:00Z')
def parse_date(s, is_begin): # Filter 'I' and 'II' s = s.strip('I ') DATE_MATCH = r'^(\d{1,2})\.(\d{1,2})\.(\d{4})$' m = re.match(DATE_MATCH, s) if not m: m = re.match(r'^(\d{4})$', s) if not m: return None if is_begin: day, mon = 1, 1 else: day, mon = 31, 12 year = int(m.groups()[0]) else: day, mon, year = [int(x) for x in m.groups()] return dateutil_parse('-'.join([str(x) for x in (year, mon, day)])).date()
def convert_value(self, value): if isinstance(value, list): return datetime(*value) elif isinstance(value, dict): return datetime(**value) elif isinstance(value, int): return datetime.fromtimestamp(value, tz=self.default_timezone) elif isinstance(value, str): try: if not self.parse_format: return dateutil_parse(value) return self.get_parsed_value(value) except: return None elif isinstance(value, date): return datetime(year=value.year, month=value.month, day=value.day)
def images(self, name=None, quiet=False, all=False, viz=False, filters=None): # pylint: disable=redefined-outer-name checks = ( (not quiet, '"quiet" must be False'), (all or False, '"all" must be True'), (not viz, '"viz" must be False'), (filters is None, '"filters" must be None'), ) for passed, err in checks: if not passed: raise NotImplementedError(err) if name: try: candidates = [self._findlayer(name)] except APIError: candidates = [] else: candidates = self.layers images = [] for candidate in candidates: images.append({ 'Created': int((dateutil_parse(candidate['Created']) - _EPOCH).total_seconds()), 'Id': candidate['Id'], 'ParentId': candidate['Parent'], 'RepoTags': candidate['RepoTags'], 'Size': candidate['Size'], 'VirtualSize': candidate['VirtualSize'], }) return images
def parse_datetime(value) -> datetime: """Parses a string a a datetime object The reason of wrapping this functionality is to preserve compatibility and future exceptions handling. Another reason is to behave depend to the configuration when parsing date and time. """ timezone = configuredtimezone() # Parse and return if value is unix timestamp if isinstance(value, float) or POSIX_TIME_PATTERN.match(value): value = float(value) if timezone is None: return datetime.fromtimestamp(value, tzutc()) \ .replace(tzinfo=None) else: return datetime.fromtimestamp(value, timezone) parsed_value = dateutil_parse(value) if timezone is not None: # The application is configured to use UTC or another time zone: # Submit without timezone: Reject and tell the user to specify the # timezone. if parsed_value.tzinfo is None: raise ValueError('You have to specify the timezone') # The parsed value is a timezone aware object. # If ends with Z: accept and assume as the UTC # Then converting it to configured timezone and continue the # rest of process parsed_value = parsed_value.astimezone(timezone) elif parsed_value.tzinfo: # The application is configured to use system's local timezone # And the sumittd value has tzinfo. # So converting it to system's local and removing the tzinfo # to achieve a naive object. parsed_value = parsed_value\ .astimezone(localtimezone())\ .replace(tzinfo=None) return parsed_value
def convert_value(self, value): if isinstance(value, list): return date(*value) elif isinstance(value, dict): return date(**value) elif isinstance(value, int): return self.convert_value(datetime.fromtimestamp(value)) elif isinstance(value, str): try: if not self.parse_format: value = dateutil_parse(value) return value.date() return self.convert_value(self.get_parsed_value(value)) except: return None elif isinstance(value, datetime): return value.date()
def parse_datetime(datetime): if isinstance(datetime, ( int, float, )): pdt = py_datetime.fromtimestamp(datetime, tz=TimezoneUTC()) return pdt if isinstance(datetime, py_datetime): return datetime if not isinstance(datetime, str): raise ValueError("datetime value not" + " 'str' or 'datetime'") try: return dateutil_parse(datetime) except ValueError: raise ValueError('datetime value %r does not' % datetime + ' match known formats')
def parse(self, raw_value): """If the input is already a datetime, pass it through. Otherwise, ensure that it is a str and use dateutil to parse it """ value = self.empty_value if isinstance(raw_value, datetime) or raw_value is None: return raw_value self.assert_parse_received_correct_type(raw_value, str) raw_value = self.parse_as_text(raw_value) if raw_value: try: value = dateutil_parse(raw_value) except ValueError: self.add_error(self.parse_error_message.format(raw_value)) else: value = None return value
def _get_up_mon_servers(self, fsid): # Resolve FSID to list of mon FQDNs servers = self.client.server_list_cluster(fsid) # Sort to get most recently contacted server first; drop any # for whom last_contact is None servers = [s for s in servers if s['last_contact']] servers = sorted(servers, key=lambda t: dateutil_parse(t['last_contact']), reverse=True) mon_fqdns = [] for server in servers: for service in server['services']: service_id = ServiceId(*(service['id'])) if service[ 'running'] and service_id.service_type == MON and service_id.fsid == fsid: mon_fqdns.append(server['fqdn']) return mon_fqdns
def _update_ratings_cache(self, cache, media_type, username=None, account=None): ratings = db.get_user_data( data_type='ratings', media_type=media_type, session=db.get_session(account=account), username=username, ) for media in ratings: # get the proper cache from the type returned by trakt media_id = media['ids']['trakt'] cache[media_id] = media cache[media_id]['rated_at'] = dateutil_parse(media['rated_at'], ignoretz=True) cache[media_id]['rating'] = media['rating']
def update_feed(profile, provider, rss_url): Content.objects.filter(user=profile.user, provider=provider).delete() print rss_url feed = feedparser.parse(rss_url) for entry in feed.get('entries')[:10]: content = Content() content.user = profile.user content.provider = provider content.title = entry.title content.link = entry.link content.date = dateutil_parse( getattr(entry, 'published', entry.updated)) content.mime_type, content.body = get_body_and_mime_type(entry) if content.mime_type == 'text/html': content.body = santize_and_hightlight_html(content.body) content.save()
def prediction2submit(prediction, cluster_map): res = [] for district in prediction: res.append( pd.DataFrame({ 'prediction': prediction[district], 'district': district })) res_1 = pd.concat(res) # res_1['district'] = res_1['district'].map(lambda x: cluster_map.ix[x,0]) res_2 = pd.DataFrame() res_2['district'] = res_1['district'].map( lambda x: cluster_map.loc[x, 'district_id']) res_2['dts'] = res_1.index.map(lambda x: '{0}-{1}'.format( dateutil_parse(str(x / 1000)).date(), x % 1000)) res_2['prediction'] = res_1['prediction'] res_2['dts_sort'] = res_2.index return res_2.sort_values(['dts_sort', 'district']).drop('dts_sort', axis=1)
def parse_time(value) -> date: """Parses a string as a time object .. note: It ignores the date part of the value The reason of wrapping this functionality is to preserve compatibility and future exceptions handling. """ if isinstance(value, float): return datetime.utcfromtimestamp(value).time() # Parse and return if value is unix timestamp if POSIX_TIME_PATTERN.match(value): value = float(value) return datetime.utcfromtimestamp(value).time() return dateutil_parse(value).time()
def change_creation_date(eml_fp, sent_date): """ change file creation date to match sent_date Parameters: ----------- eml_fp: str full path to the file sent_date: str date at which email was sent Returns: -------- """ emailtime = dateutil_parse(sent_date).timetuple() log_msg = "SENT ON: %s" % (str(emailtime)) logging.debug(log_msg) atime = int(mktime(emailtime)) times = (atime, atime) try: #new_fn_fp = abspath(join(dirname,new_filename)) #new filename full path #ChangeFileCreationTime(dirname+"\\"+new_filename,atime) #ChangeFileCreationTime(new_fn_fp,atime) wintime = pywintypes.Time(atime) try: #HERE WE TRY TO CHANGE the FILE CREATE DATE winfile = win32file.CreateFile( eml_fp, win32con.GENERIC_WRITE, win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE | win32con.FILE_SHARE_DELETE, None, win32con.OPEN_EXISTING, win32con.FILE_ATTRIBUTE_NORMAL, None) win32file.SetFileTime(winfile, wintime, None, None) winfile.close() except: log_msg = "179 error changing creation date for fname :%s" % ( eml_fp) logging.info(log_msg) if stop_error: raise #NOW WE CHANGE THE LAST CHANGE TIME #setFileAttributes(eml_fp, from_sender, title,comments) os.utime(eml_fp, times) except: log_msg = "294 failed to change file flags for : %s" % (eml_fp) logging.info(log_msg) raise
def test_response_body(self, setup_es): """Tests the response body of a search query.""" company = CompaniesHouseCompanyFactory( name='Pallas', company_number='111', incorporation_date=dateutil_parse('2012-09-12T00:00:00Z'), company_status='jumping', ) sync_object(CompaniesHouseCompanySearchApp, company.pk) setup_es.indices.refresh() url = reverse('api-v4:search:companieshousecompany') response = self.api_client.post(url) assert response.status_code == status.HTTP_200_OK assert response.json() == { 'count': 1, 'results': [ { 'id': str(company.pk), 'name': company.name, 'company_category': company.company_category, 'incorporation_date': company.incorporation_date.date().isoformat(), 'company_number': company.company_number, 'company_status': company.company_status, 'registered_address': { 'line_1': company.registered_address_1, 'line_2': company.registered_address_2, 'town': company.registered_address_town, 'county': company.registered_address_county, 'postcode': company.registered_address_postcode, 'country': { 'id': str(company.registered_address_country.id), 'name': company.registered_address_country.name, }, }, 'sic_code_1': company.sic_code_1, 'sic_code_2': company.sic_code_2, 'sic_code_3': company.sic_code_3, 'sic_code_4': company.sic_code_4, 'uri': company.uri, }, ], }
def wbdeb_parse(emit, html_file): doc = html.parse(html_file) for table in doc.findall('//table'): if 'List of Debarred' not in table.get('summary', ''): continue rows = table.findall('.//tr') print table.get('summary'), len(rows) for row in rows: tds = row.findall('./td') if len(tds) != 6: continue values = [clean_value(td) for td in tds] uid = sha1() for value in values: uid.update(value.encode('utf-8')) uid = uid.hexdigest()[:10] names = clean_name(values[0]) if not len(names): log.warning("No name: %r", values) continue record = { 'uid': make_id('wb', 'debarred', uid), 'name': values[0], 'nationality': normalize_country(values[2]), 'program': values[5], 'addresses': [{ 'text': values[1], 'country': normalize_country(values[2]) }], 'other_names': [], 'updated_at': dateutil_parse(values[3]).date().isoformat() } for name in names[1:]: record['other_names'].append({'other_name': name}) record.update(SOURCE) emit.entity(record)
def test__update_an_event_with_naive_datetime(api_client, minimal_event_dict, user): # create an event api_client.force_authenticate(user=user) response = create_with_post(api_client, minimal_event_dict) assert_event_data_is_equal(minimal_event_dict, response.data) data2 = response.data # store updates event_id = data2.pop('@id') data2['start_time'] = (datetime.now() + timedelta(days=1)).isoformat() response2 = update_with_put(api_client, event_id, data2) # API should have assumed UTC datetime data2['start_time'] = pytz.utc.localize(dateutil_parse(data2['start_time'])).isoformat().replace('+00:00', 'Z') data2['event_status'] = 'EventRescheduled' # assert assert_event_data_is_equal(data2, response2.data)