def filter_timesteps(timesteps, aggregation="monthly", start=None, end=None): from dateutil.parser import parse as date_parser LOGGER.debug("aggregation: %s", aggregation) if (timesteps is None or len(timesteps) == 0): return [] timesteps.sort() work_timesteps = within_date_range(timesteps, start, end) new_timesteps = [work_timesteps[0]] for index in range(1, len(work_timesteps)): current = date_parser(new_timesteps[-1]) candidate = date_parser(work_timesteps[index]) if current.year < candidate.year: new_timesteps.append(work_timesteps[index]) elif current.year == candidate.year: if aggregation == "daily": if current.timetuple()[7] == candidate.timetuple()[7]: continue elif aggregation == "weekly": if current.isocalendar()[1] == candidate.isocalendar()[1]: continue elif aggregation == "monthly": if current.month == candidate.month: continue elif aggregation == "yearly": if current.year == candidate.year: continue # all checks passed new_timesteps.append(work_timesteps[index]) else: continue return new_timesteps
def get_plan_values(self, plan_column): """ Get values from the Plans table and store in order matching mrn / study_instance_uid :param plan_column: name of the SQL column to be queried :type plan_column: str :return: values from the Plans table for the DVHs stored in this class :rtype: list """ with DVH_SQL() as cnx: condition = "study_instance_uid in ('%s')" % "','".join( self.study_instance_uid) data = cnx.query('Plans', 'study_instance_uid, %s' % plan_column, condition) force_date = cnx.is_sqlite_column_datetime( 'Plans', plan_column) # returns False for pgsql uids = [row[0] for row in data] values = [row[1] for row in data] if force_date: # sqlite does not have date or time like variables for i, value in enumerate(values): try: if type(value) is int: values[i] = str(date_parser(str(value))) else: values[i] = str(date_parser(value)) except Exception: values[i] = 'None' return [values[uids.index(uid)] for uid in self.study_instance_uid]
def check_data_response(self, params, expected, timedelta=None): ''' Check an API call with a given set of parameters returns expected content ''' response = requests.get(self.DATA_API_URL, params=params).json() if timedelta == None: self.assertEqual(response, expected) else: self.assertEqual(response['status'], expected['status']) self.assertEqual(response['errors'], expected['errors']) self.assertEqual(len(response['content']), len(expected['content'])) for i, measurement in enumerate(response['content']): response_datetime = date_parser( response['content'][i]['datetime']) expected_datetime = date_parser( expected['content'][i]['datetime']) self.assertTrue(response_datetime >= expected_datetime) self.assertTrue( response_datetime < expected_datetime + timedelta) self.assertEqual(response['content'][i].keys(), expected['content'][i].keys()) for key, val in response['content'][i].iteritems(): if key != 'datetime': self.assertEqual(response['content'][i][key], expected['content'][i][key]) return response
def suckdry_planlos(): import urllib f = urllib.urlopen("http://planlosbremen.de/termine/service/monat") #https://planlosbremen.de/termine/service/location/5 #jsondata = f.read() data = simplejson.load(f) u = db.User.find_one() for i in data: loc_id = i['fields']['location'] url = urllib.urlopen( "http://planlosbremen.de/termine/service/location/%s" % loc_id) loc = simplejson.load(url) location = loc_name = loc[0]['fields']['name'] e = db.Event() f = i['fields'] e.title = unicode(f['title']) e.author = u form_date = date_parser(f['datum']).date() form_time = date_parser(f['time']).time() e.eventdate = datetime.combine(form_date, form_time) e.short_desc = unicode(f['short_desc']) e.desc = unicode(f['desc']) e.url = unicode(f['exturl']) e.tags = [unicode(f['type'])] e.is_published = f['is_pub'] e.save()
def _handle_slice_conversion(self, key: slice) -> slice: # handle slide start if type(key.start) == str: key = slice(nyc.localize(date_parser(key.start)), key.stop) elif type(key.start) == int: key = slice(self._convert_offset_to_datetime(key.start), key.stop) elif type(key.start) == date: key = slice( nyc.localize( datetime.combine(key.start, datetime.min.time())), key.stop, ) # handle slice end if type(key.stop) == str: key = slice( key.start, nyc.localize(date_parser(key.stop)) + timedelta(days=1), ) elif type(key.stop) == int: key = slice(key.start, self._convert_offset_to_datetime(key.stop)) elif type(key.stop) == date: key = slice( key.start, nyc.localize( datetime.combine(key.stop, datetime.min.time())) + timedelta(days=1), ) return key
def time_difference(start, end_list, unit='second'): """ Calculate time differences between start and end_list items in specified unit. :param start: start datetime (type = pandas.core.series.Series) :param end_list: list of end datetimes (type = pandas.core.series.Series) :param unit: what is the unit of the specified time difference (default = 'second') :return: list of time differences from start to end_list given some unit """ start = date_parser(start) end_list = list(map(lambda d: date_parser(d), end_list)) l = [] for end in end_list: timedelta = (end - start).total_seconds() factor = 1 if unit == 'minute': factor = 60 elif unit == 'hour': factor = 3600 elif unit == 'day': factor = 3600 * 24 elif unit == 'year': factor = 3600 * 24 * 365 l.append(round(timedelta / factor, 1)) return l
def suckdry_planlos(): import urllib f = urllib.urlopen("http://planlosbremen.de/termine/service/monat") #https://planlosbremen.de/termine/service/location/5 #jsondata = f.read() data = simplejson.load(f) u = db.User.find_one() for i in data: loc_id = i['fields']['location'] url = urllib.urlopen("http://planlosbremen.de/termine/service/location/%s" % loc_id) loc = simplejson.load(url) location = loc_name = loc[0]['fields']['name'] e = db.Event() f = i['fields'] e.title = unicode(f['title']) e.author = u form_date = date_parser(f['datum']).date() form_time = date_parser(f['time']).time() e.eventdate = datetime.combine(form_date, form_time) e.short_desc = unicode(f['short_desc']) e.desc = unicode(f['desc']) e.url = unicode(f['exturl']) e.tags = [unicode(f['type'])] e.is_published = f['is_pub'] e.save()
def create_users(): User.objects.exclude(pk=1).delete() for pk, fields in users.iteritems(): if pk != 1: if fields['email'] != '': existing = User.objects.filter(email = fields['email']) if existing.count() > 0: ou = existing[0] if ou.is_active == False and fields['is_active'] == True: replace_users[ou.pk] = pk for k,v in replace_users.iteritems(): if v == ou.pk: replace_users[k] = pk ou.delete() elif ou.is_active == True and fields['is_active'] == False: replace_users[pk] = ou.pk for k,v in replace_users.iteritems(): if v == pk: replace_users[k] = ou.pk continue else: replace_users[ou.pk] = pk for k,v in replace_users.iteritems(): if v == ou.pk: replace_users[k] = pk ou.delete() #print "email:", fields['email'] nu = User(pk=pk) nu.username = fields['username'] if fields['email']: nu.email = fields['email'] nu.status = 1 nu.password = fields['password'] nu.full_name = fields['profile']['full_name'] nu.message = fields['profile']['message'] nu.is_active = fields['is_active'] nu.is_staff = fields['is_staff'] nu.is_superuser = fields['is_superuser'] nu.comment_count = fields['profile']['comment_count'] nu.dateo_count = fields['profile']['item_count'] nu.vote_count = fields['profile']['vote_count'] nu.client_domain = datea nu.save() joined = date_parser(fields['date_joined']) lastlog = date_parser(fields['last_login']) User.objects.filter(pk=nu.pk).update(date_joined=joined, created=joined, last_login=lastlog) for pk, fields in usersSocial.iteritems(): if fields['user'] != 1: nusoc = UserSocialAuth(pk=pk) nusoc.provider = fields['provider'] nusoc.uid = fields['uid'] nusoc.user_id = get_user(int(fields['user'])) nusoc.extra_data = fields['extra_data'] nusoc.save()
def get_past_date(str_days_ago): """ Converts arbitrary "updated at" strings to proper datetimes. """ str_days_ago = str_days_ago.replace(' an ', ' 1 ') # When it's been <2 hours, Poshmark returns "an hour ago" # instead of "1 hour ago" - which, without this replacement, # screws up the later date parsing. today = datetime.datetime.today() split_str = str_days_ago.split() if 'Yesterday' in split_str: return datetime.datetime.now() - relativedelta(days=1) elif len(split_str) < 2: # Could all of this be replaced by date_parser? date = date_parser(str_days_ago[8:]) return date elif len(split_str) == 2: date = date_parser(str_days_ago[8:]) return date elif len(split_str) > 2: if 'a minute' in split_str[2]: return datetime.datetime.now() - \ datetime.timedelta(minutes=1) elif 'seconds ago' in split_str[2]: return datetime.datetime.now() - \ datetime.timedelta(minutes=1) elif 'minute' in split_str[2]: if split_str[1] == 'a': split_str[1] = 1 return datetime.datetime.now() - \ datetime.timedelta(minutes=int(split_str[1])) elif 'hour' in split_str[2]: date = datetime.datetime.now() - \ relativedelta(hours=int(split_str[1])) return date elif 'day' in split_str[2]: date = today - relativedelta(days=int(split_str[1])) return date if 'seconds' in str_days_ago: return datetime.datetime.now() - \ datetime.timedelta(minutes=1) else: return date_parser(str_days_ago[8:]) else: raise ValueError(f'Supplied date str is {split_str}, ' + 'which doesn\'t match any supported formats.')
def parse_dates(start=None, end=None): try: start_date = date_parser(request.form.get("start_date", start)) end_date = date_parser(request.form.get("end_date", end)) except ValueError as e: raise ResourceInvalidInputException(str(e)) if end_date < start_date: raise ResourceInvalidInputException("end date < start date") return start_date, end_date
def get_index_of_first_date(self): for i, row in enumerate(self.text): if are_all_strings_in_text(row, ['/', ':', 'M']) or \ are_all_strings_in_text(row, ['.', ':', 'M']): try: date_parser(row.split(' ')[0].strip()) return i except: pass return None
class XMLPRTransDataParser(XMLParser): """ Parse Trans Data """ RequiredField = namedtuple('RequiredField', 'f_name f_type') RECORD_FIELDS = ( RequiredField('ref', str), RequiredField('doc_sq', float), RequiredField('doc_datd', lambda _iso_date: date_parser(_iso_date)), RequiredField('doc_nam_a', str), RequiredField('doc_iban_a', str), RequiredField('doc_nam_b', str), RequiredField('doc_iban_b', str), RequiredField('msrprd_date', lambda _iso_date: date_parser(_iso_date)), RequiredField('id_contract', str), RequiredField('doc_status', int), ) def __init__(self, _data): super().__init__(_data) def parse(self): _parsed_data = [] records_obj = self.tree_obj.findall("record") if not records_obj: abort(XMLResponse(code="80", message=f"Empty PRTrans Data xml", status=400)) for record in records_obj: parsed_record = {} for field in self.RECORD_FIELDS: field_name = field.f_name element = record.find(f".//{field_name}", namespaces=self.namespaces) if element is None or not element.text: abort(XMLResponse(code="30", message=f"'{field_name}' is required", status=400)) try: validated_value = field.f_type(element.text) # try to convert element to required type except ValueError: abort(XMLResponse(code="30", message=f'{field.f_name} has incorrect data type', status=400)) else: parsed_record[field.f_name] = validated_value if field_name == "doc_status": self.validate_doc_status_value(validated_value, field_name) _parsed_data.append(parsed_record) return _parsed_data @staticmethod def validate_doc_status_value(value, field_name): if value not in (0, -1): abort(XMLResponse(code="30", message=f"'{field_name}' should be 0 or -1", status=400))
def compute_duration_of_a_track(a_track): features_from_a_track = a_track['features'] time_from_a_track = list( map(lambda feature: feature['properties']['time'], features_from_a_track)) a = date_parser(time_from_a_track[0]) b = date_parser(time_from_a_track[-1]) total_minutes = float((b - a).seconds) / 60 return total_minutes
def add_user_tweets_to_scrap(): command_service.add_user_tweets_to_scrap( username=request.form['username'], since=date_parser(request.form['since']) if 'since' in request.form else None, until=date_parser(request.form['until']) if 'since' in request.form else None, queue_name=request.form['queue_name'], scrap_series=request.form['scrap_series'], interval_type=interval_utils.TimeIntervalType.get_from_string( request.form['interval_type'])) return get_success_response()
def measured_date(self): index_of_first_date = self.get_index_of_first_date() date_candidate_1 = self.text[index_of_first_date].split(' ')[0] date_candidate_2 = self.text[index_of_first_date + 2].split(' ')[0] try: return str(date_parser(date_candidate_1)).split(' ')[0] except: try: return str(date_parser(date_candidate_2)).split(' ')[0] except: pass return None
def is_date(self, date_string, fuzzy=False): """ Return whether the string can be interpreted as a date. :param string: str, string to check for date :param fuzzy: bool, ignore unknown tokens in string if True """ try: date_parser(date_string, fuzzy=fuzzy) return True except ValueError: return False
def parse_date_str(date_str): if date_str: try: return date_parser(date_str) except ParserError: # Adding exception to try fuzzy matching only if strict matching doesn't work. # Instigated by errors w/ RadioCanada date format # e.g. "Fri Oct 01 2021 21:36:43 GMT+0000 (Coordinated Universal Time)" return date_parser(date_str, fuzzy=True) except (ValueError, OverflowError, AttributeError, TypeError): # near all parse failures are due to URL dates without a day # specifier, e.g. /2014/04/ return None
def create_dateos(): Dateo.objects.all().delete() DateoStatus.objects.all().delete() for pk, fields in dateos.iteritems(): d = Dateo(pk=pk) d.status = fields['status'] d.content = fields['content'] d.user_id = get_user(fields['user']) d.address = fields['address'] if fields['position']: d.position = GEOSGeometry(fields['position']) d.vote_count = fields['vote_count'] d.comment_count = fields['comment_count'] d.follow_count = fields['follow_count'] d.campaign_id = fields['action'] d.client_domain = datea cid = fields['action'] if cid in replace_mapeos: cid = replace_mapeos[cid] campaign = Campaign.objects.get(pk=cid) # agregar categoria del mapeo anterior d.category = campaign.category d.save() for i in fields['images']: d.images.add(Image.objects.get(pk=i)) # categoria -> etiquetas # print fields if fields['category']: new_tag = find_tag(fields['category']) d.tags.add(new_tag) d.tags.add(campaign.main_tag) created = date_parser(fields['created']) modified = date_parser(fields['modified']) Dateo.objects.filter(pk=d.pk).update(created=created, modified=modified) if d.status != 'new': ds = DateoStatus() ds.user = d.campaign.user ds.status = d.status ds.dateo = d ds.campaign = d.campaign ds.save()
def add_search_to_scrap(): command_service.add_search_to_scrap( phrase=request.form['to_search'], since=date_parser(request.form['since']) if 'since' in request.form else None, until=date_parser(request.form['until']) if 'until' in request.form else None, language=request.form['language'] if 'language' in request.form else None, queue_name=request.form['queue_name'], scrap_series=request.form['scrap_series'], interval_type=interval_utils.TimeIntervalType.get_from_string( request.form['interval_type'])) return get_success_response()
def insert_influx(data): points = [] #Add status flag data['status'] = 1 if data.has_key('timestamp'): t = data.pop('timestamp') if(type(t) != datetime): t = date_parser().parse(t.decode('utf-8')) else: t = datetime.now() timestamp = t.isoformat() tags = {} if data.has_key('site_id'): tags["site_id"] = int(data.pop('site_id')) if data.has_key('site_name'): tags["site_name"] = data.pop('site_name') logger.debug("Prepping data for influx %s"%str(data)) for key in data: point = { "measurement": key, "time": timestamp, "tags": tags, "fields": { "value": float(data[key]) } } points.append(point) logger.debug('writing influx points: %s' %(str(points))) influx.write_points(points)
def parse_date_str(date_str): if date_str: try: return date_parser(date_str) except (ValueError, OverflowError, AttributeError, TypeError): # logger.error("error occurs when parse date str: %s url: %s ",date_str, url) return None
def get_context_data(self, **kwargs): """ Get Win data for use in the template """ context = FormView.get_context_data(self, **kwargs) context.update({"win": self.win_dict}) context['win']['date'] = date_parser(self.win_dict['date']) return context
def _cast_to_type(self, value): """Convert the value to a date and raise error on failures""" if isinstance(value, str) and not value: return None if isinstance(value, datetime.datetime): self.fail("datetime", value=value) if isinstance(value, datetime.date): return value try: value = date_parser(value) if not ( value.hour == 0 and value.minute == 0 and value.second == 0 and value.microsecond == 0 ): self.fail("datetime", value=value) return value.date() except ValueError: self.fail("invalid", value=value)
def fetch_dataset_metadata(dataset): try: ds_entity = registry.action.package_show(id=dataset.name) except Exception: raise CouldNotFetchPackageList() dataset.last_modified = date_parser(ds_entity.get('metadata_modified', "")) new_urls = [ resource['url'] for resource in ds_entity.get('resources', []) if resource['url'] not in dataset.resource_urls ] dataset.resource_urls.extend(new_urls) urls = [resource['url'] for resource in ds_entity.get('resources', [])] for deleted in set(dataset.resource_urls) - set(urls): dataset.resource_urls.remove(deleted) try: dataset.license = ds_entity['license'] except KeyError: pass dataset.is_open = ds_entity.get('isopen', False) db.session.add(dataset) try: db.session.commit() except sa.exc.IntegrityError: db.session.rollback() return dataset
def fetch_dataset_metadata(dataset): ds_reg = registry.action.package_show(id=dataset.name) if ds_reg.get('success', False): ds_entity = ds_reg['result'] dataset.last_modified = date_parser(ds_entity.get('metadata_modified', "")) new_urls = [resource['url'] for resource in ds_entity.get('resources', []) if resource['url'] not in dataset.resource_urls] dataset.resource_urls.extend(new_urls) urls = [resource['url'] for resource in ds_entity.get('resources', []) ] for deleted in set(dataset.resource_urls) - set(urls): dataset.resource_urls.remove(deleted) try: dataset.license = ds_entity['license'] except KeyError: pass dataset.is_open = ds_entity.get('isopen', False) db.session.add(dataset) try: db.session.commit() except sa.exc.IntegrityError: db.session.rollback() return dataset else: raise CouldNotFetchPackageList()
def date_to_utc(input): """Convert string to date""" try: date = date_parser(input) return date.strftime('%m/%d/%Y') except Exception: return input
def test_label_create(mocker, emburse_client): label_id = str(uuid.uuid4()) new_label = { "id": label_id, "url": "https://api.emburse.com/v1/labels/{0}".format(label_id), "name": "Storm Trooper #{0}".format(randint(1000, 9999)), "created_at": datetime.datetime.utcnow().isoformat() } label = emburse_client.Label mocker.patch.object(label, 'make_request') label.make_request.return_value = new_label label = label.create(**{'name': new_label['name']}) assert isinstance(label, Label) assert isinstance(label.created_at, datetime.datetime) for key, value in new_label.items(): assert hasattr(label, key) if isinstance(value, dict): obj = getattr(label, key) for sub_key, sub_value in value.items(): assert hasattr(obj, sub_key) assert getattr(obj, sub_key) == sub_value elif key == 'created_at': assert getattr(label, key) == date_parser(value) else: assert getattr(label, key) == value
def _extract_date(line): """Return date that is in line""" for date_str in RE_DATE.finditer(line): try: return date_parser(date_str.group('date')).date() except: continue
def test_shared_link_create(mocker, emburse_client): link_id = str(uuid.uuid4()) card_id = str(uuid.uuid4()) new_link_data = { "id": link_id, "url": "https://api.emburse.com/v1/shared-links/{0}".format(link_id), "link": "https://app.emburse.com/c/{0}".format(card_id), "card": card_id } link = emburse_client.SharedLink mocker.patch.object(link, 'make_request') link.make_request.return_value = new_link_data link = link.create(**new_link_data) assert isinstance(link, SharedLink) for key, value in new_link_data.items(): assert hasattr(link, key) if isinstance(value, dict): obj = getattr(link, key) for sub_key, sub_value in value.items(): assert hasattr(obj, sub_key) assert getattr(obj, sub_key) == sub_value elif key == 'created_at': assert getattr(link, key) == date_parser(value) else: assert getattr(link, key) == value
def str_arr_to_date_arr(arr, date_parser_kwargs=None, force=False): """Convert an array of datetime strings to a list of datetime objects Parameters ---------- arr : array-like Array of datetime strings compatible with dateutil.parser.parse date_parser_kwargs : dict, optional Keyword arguments to be passed into dateutil.parser.parse force : bool If true, failed parsings will result in original value. If false, dateutil.parser.parse's error will be raised on failures. Returns ---------- list list of datetime objects """ kwargs = {} if date_parser_kwargs is None else date_parser_kwargs dates = [] for date_str in arr: try: date = date_parser(date_str, **kwargs) except Exception as e: if force: date = date_str else: raise e dates.append(date) return dates
def _hg_log(repo_path, max_depth, since_date): repo = hglib.open(repo_path) commits = repo.log() since_date = date_parser(since_date) since_date = time.mktime(since_date.timetuple()) ret = [] for i, commit in enumerate(commits): (rev, node, tags, branch, author, desc, date) = commit # lets convert time to Unix timestamp date = int(time.mktime(date.timetuple())) if i > max_depth: break if date < since_date: break ret.append({ 'hash': node, 'author': author, 'time': date, 'commit': desc }) return ret
def _get_update_dates(section, docname, post_date_format): """ Return list of dates of updates found section. """ update_nodes = list(section.traverse(UpdateNode)) update_dates = [] for update_node in update_nodes: try: update = datetime.strptime(update_node["date"], post_date_format) except ValueError: if date_parser: try: update = date_parser(update_node["date"]) except ValueError: raise ValueError("invalid post date in: " + docname) else: raise ValueError( "invalid post date (%s) in " % (date) + docname + ". Expected format: %s" % post_date_format ) # Insert a new title element which contains the `Updated on {date}` logic. substitute = nodes.title("", "Updated on " + update.strftime(post_date_format)) update_node.insert(0, substitute) update_node["classes"] = ["note", "update"] update_dates.append(update) return update_dates
def _getitem(self, key): if type(key) == str: key = nyc.localize(date_parser(key)) elif type(key) == int: key = self._convert_offset_to_datetime(key) elif type(key) == date: key = nyc.localize(datetime.combine(key, datetime.min.time())) if ( not len(self.data.symbol_data) or key > self.data.symbol_data.index[-1] ): self.data.fetch_data_timestamp(key) if not len(self.data.symbol_data): raise ValueError( f"details for symbol {self.data.symbol} do not exist" ) try: return self.data.symbol_data.iloc[ self.data.symbol_data.index.get_loc(key, method="ffill") ][self.name] except KeyError: self.data.fetch_data_timestamp(key) return self.data.symbol_data.index.get_loc( key, method="nearest" )
def _custom_save(self, force_insert=False, validate=True, clean=False, write_concern=None, cascade=None, cascade_kwargs=None, _refs=None, username=None, **kwargs): """ Override our core custom save. This will ensure if there is a "date" string available for the email that we generate a corresponding "isodate" field which is more useful for database sorting/searching. """ if hasattr(self, 'date'): if self.date: if isinstance(self.date, datetime.datetime): self.isodate = self.date self.date = convert_datetimes_to_string(self.date) else: self.isodate = date_parser(self.date, fuzzy=True) else: if self.isodate: if isinstance(self.isodate, datetime.datetime): self.date = convert_datetimes_to_string(self.isodate) else: self.isodate = None return super(self.__class__, self)._custom_save(force_insert, validate, clean, write_concern, cascade, cascade_kwargs, _refs, username)
def _get_update_dates(section, docname, post_date_format): """Return list of dates of updates found section.""" update_nodes = list(section.traverse(UpdateNode)) update_dates = [] for update_node in update_nodes: try: update = datetime.strptime(update_node['date'], post_date_format) except ValueError: if date_parser: try: update = date_parser(update_node['date']) except ValueError: raise ValueError('invalid post date in: ' + docname) else: raise ValueError('invalid post date (%s) in ' % (date) + docname + ". Expected format: %s" % post_date_format) substitute = nodes.title(u'', update_node[0][0].astext() + u' ' + update.strftime(post_date_format)) update_node[0].replace_self(substitute) # for now, let updates look like note update_node['classes'] = ['note', 'update'] update_dates.append(update) return update_dates
def parseDate(self, dateString): try: return date_parser(dateString).date() except Exception as e: print(str(e)) return None
def locked(win): if not win['sent']: return False sent = date_parser(win['sent'][0]) sent_delta = (timezone.now() - sent) # tz doesn't really matter return sent_delta.days >= settings.EDIT_TIMEOUT_DAYS
def test_label_update(mocker, emburse_client, label_dict): lab_data = label_dict lab_update_data = {"name": "Droid Repair Fleet #{0}".format(randint(1000, 9999))} updated_lab_dict = lab_data updated_lab_dict['name'] = lab_update_data['name'] label = Label( auth_token='Testing123', **lab_data ) mocker.patch.object(label, 'make_request') label.make_request.return_value = updated_lab_dict label.update(**lab_update_data) assert isinstance(label, Label) assert isinstance(label.created_at, datetime.datetime) for key, value in updated_lab_dict.items(): assert hasattr(label, key) if isinstance(value, dict): obj = getattr(label, key) for sub_key, sub_value in value.items(): assert hasattr(obj, sub_key) assert getattr(obj, sub_key) == sub_value elif key == 'created_at': assert getattr(label, key) == date_parser(value) else: assert getattr(label, key) == value
def parse_date_str(date_str): try: datetime_obj = date_parser(date_str) return datetime_obj except: # near all parse failures are due to URL dates without a day # specifier, e.g. /2014/04/ return None
def parse_date_str(date_str): if date_str: try: return date_parser(date_str) except (ValueError, OverflowError, AttributeError, TypeError): # near all parse failures are due to URL dates without a day # specifier, e.g. /2014/04/ return None
def get_context_data(self, **kwargs): context = TemplateView.get_context_data(self, **kwargs) # get all user's wins url = settings.WINS_AP + '?user__id=' + str(self.request.user.id) win_response = rabbit.get(url, request=self.request).json() wins = win_response['results'] # parse dates for win in wins: win['created'] = date_parser(win['created']) win['date'] = date_parser(win['date']) if win['updated']: win['updated'] = date_parser(win['updated']) win['sent'] = [date_parser(d) for d in win['sent']] if win['responded']: win['responded']['created'] = ( date_parser(win['responded']['created']) ) win['last_modified'] = win['updated'] or win['created'] # split wins up for user into unsent, sent and responded unsent = [w for w in wins if not w['complete']] context['unsent'] = sorted(unsent, key=lambda w: w['last_modified']) responded = [w for w in wins if w['responded']] context['responded'] = sorted( responded, key=lambda w: w['company_name'], ) # skip ones that weren't yet sent as well as not responded yet # better to show not sent wins as a seperate table in UI sent = [ w for w in wins if w['complete'] and w['sent'] and w not in context['responded'] ] context['sent'] = sorted( sent, key=lambda w: w['sent'][0], reverse=True, ) return context
def fetch_dataset_metadata(dataset): ds_entity = registry.package_entity_get(dataset.name) dataset.last_modified = date_parser(ds_entity.get('metadata_modified', "")) new_urls = [resource['url'] for resource in ds_entity.get('resources', []) if resource['url'] not in dataset.resource_urls] dataset.resource_urls.extend(new_urls) db.session.add(dataset) return dataset
def save_item(self, item): items = self.database.items item = dict(item) item['date_published'] = date_parser(item['date_published']) items.update_one( {self.uniq_key: item[self.uniq_key]}, {'$set': item}, upsert=True, bypass_document_validation=self.safe )
def within_date_range(timesteps, start=None, end=None): from dateutil.parser import parse as date_parser start_date = None if start is not None: start_date = date_parser(start) end_date = None if end is not None: end_date = date_parser(end) new_timesteps = [] for timestep in timesteps: candidate = date_parser(timestep) # within time range? if start_date is not None and candidate < start_date: continue if end_date is not None and candidate > end_date: break new_timesteps.append(timestep) return new_timesteps
def edit(service_coupon_id): serviceCoupon = g.proxies.ServiceCoupons.from_id(service_coupon_id) if not serviceCoupon: abort(404) serviceCoupon["ServiceCoupon"]["starts"] = date_parser(serviceCoupon["ServiceCoupon"]["starts"]) serviceCoupon["ServiceCoupon"]["expires"] = date_parser(serviceCoupon["ServiceCoupon"]["expires"]) form = ServiceCouponForm(**serviceCoupon["ServiceCoupon"]) facade = Facade(form=form, serviceCoupon=serviceCoupon) groups = g.proxies.ServiceGroups.list() form.service_group_id.choices = groups if form.validate_on_submit(): serviceCoupon = g.proxies.ServiceCoupons.update(service_coupon_id, **form.data) facade["service_coupon_id"] = serviceCoupon["ServiceCoupon"]["id"] facade.successful = True return facade
def get_context_data(self, **kwargs): context = TemplateView.get_context_data(self, **kwargs) resp = get_win_details(kwargs['win_id'], self.request) if resp.status_code != 200: raise Http404 context['win'] = resp.json() context['locked'] = locked(context['win']) context['edit_days'] = settings.EDIT_TIMEOUT_DAYS context['win']['date'] = date_parser(context['win']['date']) return context
def parse(self, fname, content=None): """ Read a single file, filter, and parse as one data matrix Inputs: fname - full file path to file for parsing content - file content (optional) """ if content is None: self.ctime = np.datetime64(date_parser(time.ctime(os.stat(fname).st_ctime))) _fname = basename(fname) m = re.match(self.match_string, _fname, re.IGNORECASE) label = _fname if m is not None: label = m.group('label').lower() data_lst = [] _name, extension = os.path.splitext(_fname.lower()) try: if extension == '.json': data, label = self.parse_json(fname, label, content=content) elif extension == '.log': data, _ = self.parse_log(fname, content=content) elif extension == '.csv': data, _ = self.parse_csv(fname, content=content) elif extension == '.lst': data, _ = self.parse_lst(fname, content=content) elif extension == '.zip': data, label = self.parse_zip(fname, label, content=content) else: if content is None and not os.path.exists(fname): raise ParseErrorNotExist('File "%s" not found' % _fname) raise ParseErrorUnknownType('Unknown file type (%s)' % _fname) # aggregate all data_lst += data except (KeyboardInterrupt, SystemExit): raise except ParseErrorUnknownType: # don't bug about unknown file types pass except: if not os.path.isfile(fname): # Re-raise the exception if no such file exec_info = sys.exc_info() raise exec_info[0], exec_info[1], exec_info[2] if self.verbosity and not _fname.startswith('.'): # ignore normal exceptions and proceed to next file print 'Ignoring %s' % _fname # Keep the name self.fname = _name self.name = _name return data_lst, label, fname
def test_episodes(client): show = Show(client, 1396) episodes = show.seasons[1].episodes episode = episodes[1] assert isinstance(episode, Episode) assert episode.season == 1 assert episode.number == 2 assert episode.trakt == 74162 assert episode.title == "Crocodile" assert isinstance(episode.first_aired, datetime) assert episode.first_aired == date_parser("2006-10-08T04:00:00.000Z") assert episode.images.screenshot.full.startswith("http")
def parse(self, xml, namespace): """ :param xml: the etree.Element to search in :param namespace: not used yet :rtype: DateTime, may be timezone aware or naive """ value = self._fetch_by_xpath(xml, namespace) if value: if self.date_format: return datetime.datetime.strptime(value, self.date_format) return date_parser(value) return self._default
def check_data_response(self, params, expected, timedelta=None): ''' Check an API call with a given set of parameters returns expected content ''' response = requests.get(self.DATA_API_URL, params=params).json() if timedelta == None: self.assertEqual(response, expected) else: self.assertEqual(response['status'], expected['status']) self.assertEqual(response['errors'], expected['errors']) self.assertEqual(len(response['content']), len(expected['content'])) for i, measurement in enumerate(response['content']): response_datetime = date_parser(response['content'][i]['datetime']) expected_datetime = date_parser(expected['content'][i]['datetime']) self.assertTrue(response_datetime >= expected_datetime) self.assertTrue(response_datetime < expected_datetime + timedelta) self.assertEqual(response['content'][i].keys(), expected['content'][i].keys()) for key, val in response['content'][i].iteritems(): if key != 'datetime': self.assertEqual(response['content'][i][key], expected['content'][i][key]) return response
def test_search_movie(client): result = client.movies("The Big Lebowski") assert result movie = result[0] assert isinstance(movie, Movie) assert movie.title == "The Big Lebowski" assert movie.year == 1998 assert movie.id == 84 assert isinstance(movie.released, datetime) assert movie.released == date_parser("1998-03-06") assert movie.images.poster.full.startswith("http") assert movie.images.fanart.full.startswith("http")
def save_from_form(self, form, author_id): self.author = author_id # directly usable values self.title = form.title.data self.short_desc = form.short_desc.data self.desc = form.desc.data self.location = form.location.data self.url = form.url.data # fields with special handling self.gender = config.GENDER_CHOICES[form.gender.data] # tags = form_date = date_parser(form.eventdate_date.data).date() form_time = date_parser(form.eventdate_time.data).time() self.eventdate = datetime.combine(form_date, form_time) if form.eventdate2_date and form.eventdate2_time: form_date = date_parser(form.eventdate2_date.data).date() form_time = date_parser(form.eventdate2_time.data).time() self.eventdate_end = datetime.combine(form_date, form_time) self.tags = form.tags.data self.save() return self._id
def __call__(self): self.install_upgrade_profile() # Skip if eCH-0147 imports are not enabled if not api.portal.get_registry_record("ech0147_import_enabled", interface=IECH0147Settings): return # Only query non-imported-from-bundle documents and dossiers affected_types = ("opengever.dossier.businesscasedossier", "opengever.document.document") query = {"portal_type": affected_types, "bundle_guid": None} primitive_pyxb_types = tuple(pyxb.binding.datatypes.__dict__.get("_PrimitiveDatatypes")) if len(primitive_pyxb_types) != 19: raise InconsistentPyXBPrimitiveTypesException trivial_pyxb_types = (pyxb_int, pyxb_boolean, pyxb_string) trivial_types = (int, basestring) for obj in self.objects(query, "Ensure all fields of eCH-0147 imported objects are properly typed."): # Skip imported-from-bundle objects not on the index if IAnnotations(obj).get(BUNDLE_GUID_KEY): continue for schema in iterSchemata(obj): for name, field in getFieldsInOrder(schema): value = getattr(field.interface(obj), name, None) value_type = type(value) # Only touch pyxb typed values if field._type and value is not None and isinstance(value_type, primitive_pyxb_types): object_path = "/".join(obj.getPhysicalPath()) logger.info( "Found PyXB values in object %s field %s field type %s value type %s.", object_path, name, repr(field._type), repr(value_type), ) if isinstance(value_type, trivial_pyxb_types) and field._type in trivial_types: with writable(field) as wfield: wfield.set(wfield.interface(obj), wfield._type(value)) elif isinstance(value_type, pyxb_date) and field._type is date: with writable(field) as wfield: wfield.set(wfield.interface(obj), wfield._type.fromordinal(wfield.toordinal())) elif isinstance(value_type, pyxb_datetime) and field._type is datetime: with writable(field) as wfield: wfield.set(wfield.interface(obj), date_parser(value.ISO())) else: logger.warn( "PyXB values in object %s field %s field type %s value type %s fell through!", object_path, name, repr(field._type), repr(value_type), )
def fetch_dataset_metadata(dataset): ds_reg = registry.action.package_show_rest(id=dataset.name) if ds_reg.get('success', False): ds_entity = ds_reg['result'] dataset.last_modified = date_parser(ds_entity.get('metadata_modified', "")) new_urls = [resource['url'] for resource in ds_entity.get('resources', []) if resource['url'] not in dataset.resource_urls] dataset.resource_urls.extend(new_urls) db.session.add(dataset) return dataset else: raise CouldNotFetchPackageList()
def add_posts(self, legid, fb_post_list): """Takes a facebook post in the form of a dictionary, adds to database.""" new_posts = [] for fb_post in fb_post_list: if FbData.objects.filter(post_id=fb_post['id']).exists(): pass else: new_post = FbData(legid=legid, post=fb_post['message'].encode('utf-8'), timestamp=date_parser(fb_post['created_time']), post_id=fb_post['id']) new_posts.append(new_post) FbData.objects.bulk_create(new_posts)
def test_date_parser(client): client.request = Mock(return_value=[ {"type": "show", "show": { "title": "foo", "ids": {"trakt": 1}, "first_aired": None, "updated_at": "2006-10-08T04:00:00.000Z" }} ]) show = client.search("foo")[0] assert show.first_aired is None assert show.updated_at == date_parser("2006-10-08T04:00:00.000Z")