def _format_url(self, template, source, last_updated, today=None): date_format = '%m-%d-%Y' lo = dateparser(last_updated).strftime(date_format) hi = datetime.now().strftime(date_format) if today: hi = dateparser(today).strftime(date_format) return template.format(source, lo, hi)
def datetime_parser(date_string): """ Parse any time string. Use a custom timezone matching if the original matching does not pull one out. """ try: date = dateparser(date_string) if date.tzinfo is None: date = dateparser(date_string, tzinfos=tzd) return date except: raise ValueError("Could not parse date string!")
def __init__(self, *args, **kwargs): ''' Expected keyword arguments: id: token id string expires: ISO-8601 string representation of expiry time issued_at: ISO-8601 string representation of time of issue tenant: tenant name this token is associated with ''' #Fail if any expected parameter is not available. self._id = kwargs['id'] self._expires = dateparser(kwargs['expires']) self._issued_at = dateparser(kwargs['issued_at']) self._tenant = kwargs['tenant']
def doImportNotes(request, sourceFile, tz, resource): dictreader = csv.DictReader(sourceFile) for row in dictreader: row['author'] = request.user if row['content'] or row['tags']: if 'first_name' in row and 'last_name' in row: if row['first_name'] and row['last_name']: try: row['author'] = User.objects.get(first_name=row['first_name'], last_name=row['last_name']) del row['first_name'] del row['last_name'] except: pass if row['event_time']: event_time = dateparser(row['event_time']) if tz != pytz.utc: localized_time = tz.localize(event_time) event_time = TimeUtil.timeZoneToUtc(localized_time) row['event_time'] = event_time try: # TODO implement tags when ready del row['tags'] except: pass NOTE_MODEL = Note.get() note = NOTE_MODEL(**row) note.creation_time = datetime.now(pytz.utc) note.modification_time = datetime.now(pytz.utc) if resource: note.position = getClosestPosition(timestamp=note.event_time, resource=resource) note.save()
def getGpxTrackSet(docroot, ns): trackCollection = [] for trk in docroot.findall("ns:trk", ns): trackName = trk.find("ns:name", ns).text trackSegPointList = trk.find("ns:trkseg", ns) trackSegment = {"name": trackName} trackSegPoints = [] foundTimeInTrackData = True for point in trackSegPointList: if point.find("ns:time", ns) is not None: time = dateparser(point.find("ns:time", ns).text) time = time.replace(tzinfo=None) else: foundTimeInTrackData = False time = "<no time>" trackPoint = {"lat": float(point.attrib["lat"]), "lon": float(point.attrib["lon"]), "ele": float(point.find("ns:ele", ns).text), "time": time} trackSegPoints.append(trackPoint) trackSegment["foundTimeInTrackData"] = foundTimeInTrackData trackSegment["trackPoints"] = trackSegPoints trackCollection.append(trackSegment) return trackCollection
def __parse_date_entry(self, data): if data is None: return None try: return dateparser(data['$']) except (ValueError, KeyError): raise WhoisError('Error parsing date from %s' % data)
def find_latest_weather_alert(): try: request = urllib2.urlopen("http://alerts.weather.gov/cap/us.php?x=1") dom = xml.dom.minidom.parse(request) latest_update = datetime.datetime(1970,1,1) ## for comparing against the latest weather entry ## this is all necessary because the government didn't feel like sorting the XML feed ## so the timestamps are all over the place. We have to find the latest entry ourselves weather_entries = dom.getElementsByTagName('entry') cur_entry = 0 for entry in weather_entries: urgency = entry.getElementsByTagName('cap:urgency')[0].childNodes[0].data severity = entry.getElementsByTagName('cap:severity')[0].childNodes[0].data updated = entry.getElementsByTagName('updated')[0].childNodes[0].data updated = dateparser(updated) updated = (updated - updated.utcoffset()).replace(tzinfo=None) cur_entry+=1 ## only get the latest immediate and severe alerts, too much spam otherwise if latest_update < updated and urgency=="Immediate" and severity=="Severe": latest_update = updated latest_entry = cur_entry alert_url = entry.getElementsByTagName('id')[0].childNodes[0].data return latest_update, latest_entry, alert_url except Exception as inst: print "find_latest_weather_alert: " + str(inst) pass
def process(self): report = self.receive_message() self.event_date = None raw_report = utils.base64_decode(report.get("raw")) for row in raw_report.splitlines(): row = row.strip() if row.startswith('; Last-Modified:'): self.event_date = row.split('; Last-Modified: ')[1].strip() self.event_date = dateparser(self.event_date) if row == "" or row.startswith(';'): continue row_splitted = row.split(';') network = row_splitted[0].strip() event = Event(report) event.add('source.network', network) event.add('extra', {'blocklist': row_splitted[1].strip()}) if self.event_date: event.add('time.source', self.event_date.isoformat()) event.add('classification.type', 'spam') event.add('raw', row) self.send_message(event) self.acknowledge_message()
def simplify_message(message): 'transforms a message instance into built-in types (json encodable)' assert not message.is_multipart() headers = NS((k.replace('-', '_').lower(), decode(v)) for k, v in message.items()) # if the @ is not found or after the first space sender = headers.get('from', '') at = sender.find('@') if sender and (at == -1 or sender.find(' ') < at): sender = sender.replace(' at ', '@') # do not make it any easier to spam? # headers['from'] = sender if sender: headers['from_hash'] = md5(sender.encode('utf-8')).hexdigest() date = headers.get('date') if date: utc = dateparser(date).astimezone(UTC) headers['date_utc'] = int(time.mktime(utc.timetuple())) message_id = headers.get('message_id') if message_id: message_id_hash = sha1(message_id.encode('utf-8')).hexdigest() headers['message_id_hash'] = message_id_hash return NS(headers=headers, payload = message.get_payload())
def main(): from dateutil.parser import parse as dateparser reporter_date = datetime.today() - timedelta(2) if len(sys.argv) >= 2: reporter_date = dateparser(sys.argv[1]) dbConnPool = _get_user_db_pool() dbStstcPool = _get_ststc_db_pool() reConn = _getReConn() toDate = date.today() - timedelta(2) toDate = reporter_date try: cacheCatReporter(dbStstcPool, reConn, toDate) except UnicodeEncodeError: pass schedule_info(dbConnPool, reConn) # page_index(dbStstcPool, reConn) reConn.set('ibbd-ststc-date', (toDate + timedelta(1)).strftime('%Y/%m/%d' )) del reConn dbConnPool.disconnect() dbStstcPool.disconnect() del dbConnPool
def _clean(self, value, instance): if not isinstance(value, date): try: value = dateparser(value) except Exception: raise ValidationError( self.validation_error.format(value)) return self.todate(value)
def get_object(self): page = Page(slug=self.kwargs["slug"]) version = self.kwargs.get("version") date = self.kwargs.get("date") if version: return page.versions.as_of(version=int(version)) if date: return page.versions.as_of(date=dateparser(date))
def get_object(self): page = Page(slug=self.kwargs['slug']) version = self.kwargs.get('version') date = self.kwargs.get('date') if version: return page.versions.as_of(version=int(version)) if date: return page.versions.as_of(date=dateparser(date))
def to_timestamp(dt): if isinstance(dt, str): dt = dateparser(dt) if isinstance(dt, date): return time.mktime(dt.timetuple()) elif isinstance(dt, NUMBER): return dt elif dt is not None: raise ValueError(dt)
def get_redirect_url(self, slug, file, **kwargs): page_file = PageFile(slug=slug, name=file) version = self.kwargs.get('version') date = self.kwargs.get('date') if version: page_file = page_file.versions.as_of(version=int(version)) if date: page_file = page_file.versions.as_of(date=dateparser(date)) return page_file.file.url
def checktime(tweet,time1,time2): # Get the time this tweet was tweeted if "postedTime" in tweet: tweettime=tweet['postedTime'] tweettime=dateparser(tweettime) # Get the date td=timedelta(hours=-5) # Normalize times to EST (-5 hours to GMT as reported from GNIP) tweettime+=td # Add the time delta to tweet time to get time of tweet in EST # If tweet time is between or the same as time1 and time2 then it is good if time1.date() <= tweettime.date() and tweettime.date() <= time2.date(): return True return False
def __init__(self, fname_pattern=None, timestr=None, fnames_only=False): self.fnames_only = fnames_only if fname_pattern and os.path.exists(fname_pattern): if os.path.isfile(fname_pattern): self.get_df(fname_pattern) elif os.path.isdir(fname_pattern): pass self.timestr = timestr self.current_time = dateparser(timestr) self.fname = os.path.join(datapath, self.current_time.strftime("%Y%m%d%H")) self.increment = timedelta(hours=1)
def get_object(self): page_slug = self.kwargs.get('slug') try: page = Page.objects.get(slug=page_slug) tags = page.pagetagset version = self.kwargs.get('version') date = self.kwargs.get('date') if version: return tags.versions.as_of(version=int(version)) if date: return tags.versions.as_of(date=dateparser(date)) except (Page.DoesNotExist, PageTagSet.DoesNotExist): return None
def get_timerange(timevar='time_ago', time_ago='1_days', date1=None, date2=None): """returns 2 unix timestamps for a time range, based either on "time_ago" string or on a pair of date strings. Options -------- timevar (string): one of 'time_ago' [default] or 'date_range' time_ago (string): time ago string (see Note 1) date1 (string): string for initial date (see Note 2) date2 (string): string for final date (see Note 2) Notes: ------ 1. The time_ago string as the form '%f_%s' with the string one of ('minutes, 'hours', or 'days'), like time_ago='1.5_days' time_ago='15_minutes' 2: 'date1' and 'date2' are strings of the form "%Y-%m-%d %H:%M:%S" """ if (timevar.lower().startswith('date') and date1 is not None and date2 is not None): tmin = mktime(dateparser(date1).timetuple()) tmax = mktime(dateparser(date2).timetuple()) if tmin > tmax: tmin, tmax = tmax, tmin else: tmax = time() tval, tunit = time_ago.split('_') opts = {} opts[tunit] = float(tval) dt_max = datetime.fromtimestamp(tmax) dt_min = dt_max - timedelta(**opts) tmin = mktime(dt_min.timetuple()) return tmin, tmax
def get_object(self): page = Page(slug=slugify(self.kwargs["slug"])) # A dummy page object. latest_page = page.history.most_recent() # Need to set the pk on the dummy page for correct MapData lookup. page.pk = latest_page.id page.name = latest_page.name self.page = page mapdata = MapData(page=page) version = self.kwargs.get("version") date = self.kwargs.get("date") if version: return mapdata.history.as_of(version=int(version)) if date: return mapdata.history.as_of(date=dateparser(date))
def set_metadata(self, obj, item, tags=[]): schema = obj.Schema() schema['title'].set(obj, item['title']) if schema.has_key('text'): schema['text'].set(obj, item['bodyText']) schema['creators'].set(obj, [item['creator']]) effective_date = dateparser().parse(item['effective_date']) schema['effectiveDate'].set(obj, effective_date) schema['creation_date'].set(obj, effective_date) schema['modification_date'].set(obj, effective_date) schema['subject'].set(obj, item['tags'] + tags) if item.has_key('remote_url') and schema.has_key('remoteUrl'): schema['remoteUrl'].set(obj, item['remote_url']) if item.has_key('author'): schema['contributors'].set(obj, [item['author']]) obj.reindexObject()
def get_context_data(self, **kwargs): context = super(CompareView, self).get_context_data(**kwargs) if self.kwargs.get('date1'): # Using datetimes to display diff. date1 = self.kwargs.get('date1') date2 = self.kwargs.get('date2') # Query parameter list used in history compare view. dates = self.request.GET.getlist('date') if not dates: dates = [v for v in (date1, date2) if v] dates = [dateparser(v) for v in dates] old = min(dates) new = max(dates) new_version = get_versions(self.object).as_of(date=new) prev_version = new_version.version_info.version_number() - 1 if len(dates) == 1 and prev_version > 0: old_version = get_versions(self.object).as_of( version=prev_version) elif prev_version <= 0: old_version = None else: old_version = get_versions(self.object).as_of(date=old) else: # Using version numbers to display diff. version1 = self.kwargs.get('version1') version2 = self.kwargs.get('version2') # Query parameter list used in history compare view. versions = self.request.GET.getlist('version') if not versions: versions = [v for v in (version1, version2) if v] if not versions: raise Http404("Versions not specified") versions = [int(v) for v in versions] old = min(versions) new = max(versions) if len(versions) == 1: old = max(new - 1, 1) if old > 0: old_version = get_versions(self.object).as_of(version=old) else: old_version = None new_version = get_versions(self.object).as_of(version=new) context.update({'old': old_version, 'new': new_version}) return context
def getGpxWaypointList(docroot, ns): wptList = [] for wpt in docroot.findall("ns:wpt", ns): if wpt.find("ns:time", ns) is not None: time = dateparser(wpt.find("ns:time", ns).text) else: time = None wptData = {"time": time, "name": wpt.find("ns:name", ns).text, "lat": float(wpt.attrib["lat"]), "lon": float(wpt.attrib["lon"]), "ele": float(wpt.find("ns:ele", ns).text), "desc": wpt.find("ns:desc", ns).text, "cmt": wpt.find("ns:cmt", ns).text, "markerAndColor": [s.strip() for s in wpt.find("ns:sym", ns). text.split(",")]} wptList.append(wptData) return wptList
def get_object(self): region = self.get_region() # A dummy page object. page = Page( slug=slugify(self.kwargs['slug']), region=region ) latest_page = page.versions.most_recent() # Need to set the pk on the dummy page for correct MapData lookup. page.pk = latest_page.id page.name = latest_page.name self.page = page mapdata = MapData(page=page, region=region) version = self.kwargs.get('version') date = self.kwargs.get('date') if version: return mapdata.versions.as_of(version=int(version)) if date: return mapdata.versions.as_of(date=dateparser(date))
def get_weather_alert_data(alert_url): try: request = urllib2.urlopen(alert_url) dom = xml.dom.minidom.parse(request) msgType = dom.getElementsByTagName('msgType')[0].childNodes[0].data note = dom.getElementsByTagName('description')[0].childNodes[0].data note = note.replace("\n"," ") pattern = re.compile("\s+") note = pattern.sub(" ", note) ##turning off the text for now because its too much spam note = "" event = dom.getElementsByTagName('event')[0].childNodes[0].data urgency = dom.getElementsByTagName('urgency')[0].childNodes[0].data severity = dom.getElementsByTagName('severity')[0].childNodes[0].data certainty = dom.getElementsByTagName('certainty')[0].childNodes[0].data senderName = dom.getElementsByTagName('senderName')[0].childNodes[0].data ## Use the "effective" value because "sent" changes every time ## the document is retrieved updated = dom.getElementsByTagName('effective')[0].childNodes[0].data updated = dateparser(updated) updated = (updated - updated.utcoffset()).replace(tzinfo=None) ago = (datetime.datetime.utcnow() - updated).seconds/60 short_url = tools.shorten_url(alert_url) ## old text, too verbose ##alert_text = "[%s] %s: %s Urgency: %s Severity: %s Certainty: %s | %s (%s minutes ago)" % (senderName, msgType, event, urgency, severity, certainty, note[0:170], ago) ## new text is self limiting to the IRC limit of 428 characters alert_text_start = "[%s] %s: %s" % (senderName, msgType, event) alert_text_end = "(%s minutes ago) [ %s ]" % (ago, short_url) alert_text = "%s | %s %s" % (alert_text_start, note[:425-(len(alert_text_start+alert_text_end))], alert_text_end) return alert_text except Exception as inst: print "get_weather_alert_data: " + str(inst) pass
def planExport(request, uuid, name, time=None, outputDirectory=None, isAjax=False): """ Normally plan export urls are built up by the planExporter.url but some exporters (pml) can take a time. """ dbPlan = getDbPlan(uuid) formatCode = request.GET.get('format') if formatCode is not None: # user explicitly specified e.g. '?format=kml' exporterClass = choosePlanExporter.PLAN_EXPORTERS_BY_FORMAT.get(formatCode) if exporterClass is None: return HttpResponseBadRequest('invalid export format %s' % formatCode) else: # filename ends with e.g. '.kml' exporterClass = None for entry in choosePlanExporter.PLAN_EXPORTERS: if name.endswith(entry.extension): exporterClass = entry.exporterClass if exporterClass is None: return HttpResponseBadRequest('could not infer export format to use: "format" query parameter not set and extension not recognized for filename "%s"' % name) exporter = exporterClass() if time: try: thetime = dateparser(time) context = DotDict({'startTime': thetime.astimezone(pytz.utc)}) exporter.initPlan(dbPlan, context) except: pass if outputDirectory: # output the exported file to a directory exporter.exportDbPlanToPath(dbPlan, os.path.join(outputDirectory, name)) return True elif not isAjax: return exporter.getHttpResponse(dbPlan, attachmentName=name) else: return exporter.getHttpResponse(dbPlan, attachmentName=None)
def process(self): report = self.receive_message() self.event_date = None if report is None or not report.contains("raw"): self.acknowledge_message() return raw_report = utils.base64_decode(report.value("raw")) for row in raw_report.split('\n'): row = row.strip() if row.startswith('; Last-Modified:'): self.event_date = row.split('; Last-Modified: ')[1].strip() self.event_date = dateparser(self.event_date) if row == "" or row.startswith(';'): continue row_splitted = row.split(';') network = row_splitted[0].strip() extra = json.dumps({'blocklist': row_splitted[1].strip()}) event = Event(report) event.add('source.network', network, sanitize=True) event.add('extra', extra, sanitize=True) if self.event_date: event.add('time.source', self.event_date.isoformat(), sanitize=True) event.add('classification.type', u'spam') event.add('raw', row, sanitize=True) self.send_message(event) self.acknowledge_message()
def extractFromJson(self, overWriteDateModified=True, overWriteUuid=True): if overWriteUuid: if not self.uuid: self.uuid = makeUuid() self.jsonPlan.uuid = self.uuid self.jsonPlan.serverId = self.pk if overWriteDateModified: self.jsonPlan.dateModified = (datetime.datetime .now(pytz.utc) .replace(microsecond=0) .isoformat()) self.jsonPlan.dateModified = self.jsonPlan.dateModified[:-6]+'Z' self.name = self.jsonPlan.name self.jsonPlan.url = self.get_absolute_url() self.jsonPlan.serverId = self.pk self.dateModified = dateparser(self.jsonPlan.dateModified).replace(tzinfo=pytz.utc) plannerUsers = User.objects.filter(username=self.jsonPlan.creator) if plannerUsers: self.creator = plannerUsers[0] else: self.creator = None # fill in stats try: exporter = statsPlanExporter.StatsPlanExporter() # print ' about to do stats' stats = exporter.exportDbPlan(self) for f in ('numStations', 'numSegments', 'numCommands', 'lengthMeters', 'estimatedDurationSeconds'): setattr(self, f, stats[f]) self.stats.numCommandsByType = stats["numCommandsByType"] self.summary = statsPlanExporter.getSummary(stats) except: logging.warning('extractFromJson: could not extract stats from plan %s', self.uuid) raise # FIX return self
def handlegeocsv(geo,tweet): if(geo==None): return "" tweettime="" tweetuser="" bodytext="" if "postedTime" in tweet: tweettime=tweet['postedTime'] tweettime=dateparser(tweettime) # Get the date td=timedelta(hours=-5) # Normalize times to EST (-5 hours to GMT as reported from GNIP) tweettime+=td # Add the time delta to tweet time to get time of tweet in EST tweettime=tweettime.strftime("%Y%m%d") if "actor" in tweet and "displayName" in tweet['actor']: tweetuser=tweet['actor']['displayName'] if "body" in tweet: bodytext=tweet['body'] #bodytext = re.sub(wspattern,' ',bodytext) #endstr=","+tweettime+","+tweetuser+","+bodytext endstr=","+tweettime if(geo['type']=="Point"): lat=str(geo['coordinates'][1]) lon=str(geo['coordinates'][0]) return lat.decode('utf-8')+","+lon.decode('utf-8')+endstr+"\n" elif(geo['type']=="Polygon"): coordinates=geo['coordinates'] # Sanity check polygon is simple (only a bounding box) if(len(coordinates)!=1 or len(coordinates[0])!=4): error("polygon size is not correct") coordinates=coordinates[0] # Enter the simple polygon lllist=zip(coordinates[0],coordinates[1],coordinates[2],coordinates[3]) lonavg=sum(lllist[0])/float(len(lllist[0])) latavg=sum(lllist[1])/float(len(lllist[1])) return str(round(lonavg,5)).decode('utf-8')+","+str(round(latavg,5)).decode('utf-8')+endstr+"\n" else: return ""
def process(self): report = self.receive_message() self.event_date = None if report is None or not report.contains("raw"): self.acknowledge_message() return raw_report = utils.base64_decode(report.value("raw")) for row in raw_report.split("\n"): row = row.strip() if row.startswith("; Last-Modified:"): self.event_date = row.split("; Last-Modified: ")[1].strip() self.event_date = dateparser(self.event_date) if row == "" or row.startswith(";"): continue row_splitted = row.split(";") network = row_splitted[0].strip() event = Event(report) event.add("source.network", network) event.add("extra", {"blocklist": row_splitted[1].strip()}) if self.event_date: event.add("time.source", self.event_date.isoformat()) event.add("classification.type", "spam") event.add("raw", row) self.send_message(event) self.acknowledge_message()
def schedulePlans(request, redirect=True): flight = None lastPlanExecution = None pe = None if request.method == 'POST': try: pids = request.POST['planIds'] planIds = [] for item in pids.split(","): planIds.append(int(item)) plans = PLAN_MODEL.get().objects.filter(id__in=planIds) if 'planExecutionId' in request.POST and request.POST['planExecutionId'] != '': pe = PLAN_EXECUTION_MODEL.get().objects.get(pk=int(request.POST['planExecutionId'])) schedule_date_string = request.POST['schedule_date'] original_schedule_date = None prefix = None if schedule_date_string: # read the date; it comes in as UTC original_schedule_date = dateparser(schedule_date_string) original_schedule_date = pytz.utc.localize(original_schedule_date) schedule_date = original_schedule_date if pe: firstPlan = pe.plan else: firstPlan = plans[0] local_date = utcToTimeZone(original_schedule_date, str(firstPlan.jsonPlan.site.alternateCrs.properties.timezone)) prefix = "%04d%02d%02d" % (local_date.year, local_date.month, local_date.day) flight_name = request.POST['flight'] # see if flight name matches prefix, if not go by the date if prefix and flight_name: if not flight_name.startswith(prefix): flight_name = None try: flight = FLIGHT_MODEL.get().objects.get(name=flight_name) except FLIGHT_MODEL.get().DoesNotExist: # see if we can look it up by date if original_schedule_date: flights = FLIGHT_MODEL.get().objects.filter(name__startswith=prefix) if flights: # pick the first one flight = flights[0] else: # it does not exist we better make one prefix = prefix + "A" groupFlight, created = GROUP_FLIGHT_MODEL.get().objects.get_or_create(name=prefix) for vehicle in VEHICLE_MODEL.get().objects.all(): newFlight = FLIGHT_MODEL.get()() newFlight.group = groupFlight newFlight.vehicle = vehicle newFlight.name = prefix + "_" + vehicle.name newFlight.locked = False newFlight.uuid = uuid4() newFlight.save(force_insert=True) if not flight: flight = newFlight if flight: for plan in plans: if not pe: pe = PLAN_EXECUTION_MODEL.get()() pe.planned_start_time = schedule_date pe.flight = flight pe.plan = plan if settings.XGDS_PLANNER2_SCHEDULE_EXTRAS_METHOD: pe = getClassByName(settings.XGDS_PLANNER2_SCHEDULE_EXTRAS_METHOD)(request, pe) pe.save() lastPlanExecution = pe except: traceback.print_exc() return HttpResponse(json.dumps({'Success':"False", 'msg': 'Plan not scheduled'}), content_type='application/json', status=406) pass if redirect: return HttpResponseRedirect(reverse('planner2_index')) else: if lastPlanExecution: return HttpResponse(json.dumps(lastPlanExecution.toSimpleDict(), cls=DatetimeJsonEncoder), content_type='application/json') return HttpResponse(json.dumps({'Success':"True", 'msg': 'Plan scheduled'}), content_type='application/json')
def wdmtoswmm5rdii(wdmpath, *dsns, **kwds): """Print out DSN data to the screen in SWMM5 RDII format. Parameters ---------- wdmpath Path and WDM filename. dsns The Data Set Numbers in the WDM file. {start_date} {end_date} """ start_date = kwds.setdefault("start_date", None) end_date = kwds.setdefault("end_date", None) # Need to make sure that all DSNs are the same interval and all are # within start and end dates. collect_tcodes = {} collect_tsteps = {} collect_keys = [] for dsn in dsns: dsn_desc = _describedsn(wdmpath, dsn) collect_tcodes[dsn_desc["tcode"]] = 1 collect_tsteps[dsn_desc["tstep"]] = 1 if start_date: assert dateparser(start_date) >= dateparser(dsn_desc["start_date"]) if end_date: assert dateparser(end_date) <= dateparser(dsn_desc["end_date"]) collect_keys.append((dsn_desc["dsn"], dsn_desc["location"])) assert len(collect_tcodes) == 1 assert len(collect_tsteps) == 1 collect_tcodes = list(collect_tcodes.keys())[0] collect_tsteps = list(collect_tsteps.keys())[0] collected_start_dates = [] collected_end_dates = [] collected_ts = {} for dsn, location in collect_keys: tmp = WDM.read_dsn(wdmpath, int(dsn), start_date=start_date, end_date=end_date) collected_start_dates.append(tmp.index[0]) collected_end_dates.append(tmp.index[-1]) collected_ts[(dsn, location)] = tmp.values maptcode = {1: 1, 2: 60, 3: 3600, 4: 86400} print("SWMM5") print("RDII dump of DSNS {0} from {1}".format(dsns, wdmpath)) print(maptcode[collect_tcodes] * collect_tsteps) print(1) print("FLOW CFS") print(len(dsns)) for dsn, location in collect_keys: print("{0}_{1}".format(dsn, location)) print("Node Year Mon Day Hr Min Sec Flow") # Can pick any time series because they should all have the same interval # and start and end dates. for dex, date in enumerate(tmp.index): for dsn, location in collect_keys: print("{0}_{1} {2} {3:02} {4:02} {5:02} {6:02} {7:02} {8}".format( dsn, location, date.year, date.month, date.day, date.hour, date.minute, date.second, collected_ts[(dsn, location)][dex], ))
def __init__(self, d): self.__dict__ = d self.resources = Resource(d.get("resources")) self.mentions = [Mention(i) for i in d.get("mentions", [])] or None self.created_at = dateparser(d.get("created_at")) self.updated_at = dateparser(d.get("updated_at"))
def wdmtoswmm5rdii(wdmpath, *dsns, **kwds): """Print out DSN data to the screen in SWMM5 RDII format. :param wdmpath: Path and WDM filename. :param dsns: The Data Set Numbers in the WDM file. :param start_date: If not given defaults to start of data set. :param end_date: If not given defaults to end of data set. """ start_date = kwds.setdefault('start_date', None) end_date = kwds.setdefault('end_date', None) # Need to make sure that all DSNs are the same interval and all are # within start and end dates. collect_tcodes = {} collect_tsteps = {} collect_keys = [] for dsn in dsns: dsn_desc = _describedsn(wdmpath, dsn) collect_tcodes[dsn_desc['tcode']] = 1 collect_tsteps[dsn_desc['tstep']] = 1 if start_date: assert dateparser(start_date) >= dateparser(dsn_desc['start_date']) if end_date: assert dateparser(end_date) <= dateparser(dsn_desc['end_date']) collect_keys.append((dsn_desc['dsn'], dsn_desc['location'])) assert len(collect_tcodes) == 1 assert len(collect_tsteps) == 1 collect_tcodes = list(collect_tcodes.keys())[0] collect_tsteps = list(collect_tsteps.keys())[0] collected_start_dates = [] collected_end_dates = [] collected_ts = {} for dsn, location in collect_keys: tmp = WDM.read_dsn(wdmpath, int(dsn), start_date=start_date, end_date=end_date) collected_start_dates.append(tmp.index[0]) collected_end_dates.append(tmp.index[-1]) collected_ts[(dsn, location)] = tmp.values maptcode = { 1: 1, 2: 60, 3: 3600, 4: 86400, } print('SWMM5') print('RDII dump of DSNS {0} from {1}'.format(dsns, wdmpath)) print(maptcode[collect_tcodes] * collect_tsteps) print(1) print('FLOW CFS') print(len(dsns)) for dsn, location in collect_keys: print(str(dsn) + '_' + location) print('Node Year Mon Day Hr Min Sec Flow') # Can pick any time series because they should all have the same interval # and start and end dates. for dex, date in enumerate(tmp.index): for dsn, location in collect_keys: print('{0}_{1} {2} {3:02} {4:02} {5:02} {6:02} {7:02} {8}'.format( dsn, location, date.year, date.month, date.day, date.hour, date.minute, date.second, collected_ts[(dsn, location)][dex]))
data_start = invoice_text.find( 'usage details') # month of invoice is listed here month_str = invoice_text[:data_start].split(' ')[-3:-1] month = month_str[0] year = month_str[1] invoice_text = invoice_text[data_start:] # no ride data before this i = find_next_entry(invoice_text, month) # find the standard form of a Modo statement while i != -1: invoice_text = invoice_text[i:] month_pos = invoice_text.find(month) year_pos = invoice_text.find(year) time_pos = invoice_text[year_pos + 4].find(':') date = dateparser(invoice_text[month_pos:year_pos + 4] + ' ' + invoice_text[time_pos - 2:time_pos + 3]) date = date.date() split = invoice_text.find('PST') + 12 if 'PVRT' in invoice_text[split:split + 65]: split = invoice_text[split:split + 65].find('PVRT') + 18 readline = invoice_text[: split] # collect only the line that this statement is for invoice_text = invoice_text[ split - 3:] # remove the statement we've already found cost = get_money(readline) cost = "%.2f" % round(cost, 2) if "$" + cost not in search_text: print(cost + " seems to be wrong in " + f + " on " + str(date))
# The IHDR chunk must appear FIRST. It contains: # Width: 4 bytes (0:3) # Height: 4 bytes (4:7) # Bit depth: 1 byte (8) # Color type: 1 byte (9) # Compression method: 1 byte (10) # Filter method: 1 byte (11) # Interlace method: 1 byte (12) self.header['Width'] = to_int(data[0:4]) self.header['Height'] = to_int(data[4:8]) self.header['Bit Depth'] = to_int(data[8]) self.header['Color Type'] = to_int(data[9]) self.header['Compression Method'] = to_int(data[10]) self.header['Filter Method'] = to_int(data[11]) self.header['Interlace Method'] = to_int(data[12]) if __name__ == '__main__': pnginfo = PNGinfo(sys.argv[1]) text = pnginfo.get_text() header = pnginfo.get_header() print 'Header:' print json.dumps(header, indent=4) print 'Comments:' for t in text: print str(t) match = re.search('date:(\D+)([\d\-T\:]+)', t) if match: timestamp = dateparser(match.group(2)) print timestamp.astimezone(pytz.utc).isoformat()
def _normalize_datetime_field(self, item: Dict) -> Dict: datetime_field = item['properties']['datetime'] item['properties']['datetime'] = dateparser(datetime_field).strftime( DATETIME_FORMAT) return item
def handle_gpsposition0(self, topic, body): # example: 2:$GPRMC,225030.00,A,3725.1974462,N,12203.8994696,W,,,220216,0.0,E,A*2B serverTimestamp = datetime.datetime.now(pytz.utc) if body == 'NO DATA': logging.info('NO DATA') return # parse record resourceIdStr, trackName, content = body.split(":", 2) resourceId = int(resourceIdStr) if not checkDataQuality(resourceId, content): logging.info('UNRECOGNIZED OR CORRUPT GPS SENTENCE: %s', content) return sentenceType, utcTime, activeVoid, lat, latHemi, lon,\ lonHemi, speed, heading, date, declination, declinationDir,\ modeAndChecksum = content.split(",") if OVERRIDE_GPS_DATE: serverTime = datetime.datetime.now(pytz.utc) overrideDate = serverTime.strftime("%d%m%y") sourceTimestamp = datetime.datetime.strptime( '%s %s' % (overrideDate, utcTime), '%d%m%y %H%M%S.%f') else: sourceTimestamp = datetime.datetime.strptime( '%s %s' % (date, utcTime), '%d%m%y %H%M%S.%f') sourceTimestamp = sourceTimestamp.replace(tzinfo=pytz.utc) lat = parseTracLinkDM(lat, latHemi) lon = parseTracLinkDM(lon, lonHemi) # Get compass heading from compass record # TODO this clobbers heading read from GPS every time. but this is for basalt. do we care? heading = None compassCacheKey = 'compass.%s' % resourceId compassInfoString = cache.get(compassCacheKey) try: if compassInfoString: compassInfo = json.loads(compassInfoString) compassRecord = compassInfo["compassRecord"] # sanity check the timestamp in the compass record compassTimeString = compassInfo['timestamp'] compassTimestamp = dateparser(compassTimeString) tdelta = serverTimestamp - compassTimestamp if tdelta.total_seconds() <= MAX_COMPASS_TIME_SECONDS: heading = float(compassRecord["compass"]) except: traceback.print_exc() # save subsystem status to cache myKey = "telemetryCleanup" status = { 'name': myKey, 'displayName': 'Telemetry Cleanup', 'statusColor': OKAY_COLOR, 'lastUpdated': datetime.datetime.utcnow().isoformat(), 'elapsedTime': '' } cache.set(myKey, json.dumps(status)) # calculate which track record belongs to cacheKey = 'gpstrack.%s' % resourceId pickledTrack = cache.get(cacheKey) if pickledTrack: # cache hit, great track = pickle.loads(pickledTrack) else: # check db for a track matching this resourceId try: basaltVehicle = BasaltVehicle.objects.get( resourceId=resourceId) except ObjectDoesNotExist: logging.warning('%s', traceback.format_exc()) raise KeyError( 'Received GPS position for the EV with resourceId %s. Please ensure there is a vehicle with that id in the BasaltVehicle table.' % resourceId) # Check for track name. We use explicit name if specified, otherwise # we check for an active flight and finally use the resourceId if len(trackName): logging.info("Using track name from listener: %s" % trackName) if len(trackName ) == 0: # I.e. we were not given a name for track already try: activeFlight = BasaltActiveFlight.objects.get( flight__vehicle=basaltVehicle) trackName = activeFlight.flight.name logging.info( "Using track name from BasaltActiveFlight: %s" % trackName) except ObjectDoesNotExist: trackName = basaltVehicle.name logging.info("Using track name from EV arg: %s" % trackName) tracks = BasaltTrack.objects.filter(name=trackName) assert len(tracks) in (0, 1) if tracks: # we already have a valid track, use that track = tracks[0] else: # must start a new track track = BasaltTrack(name=trackName, vehicle=basaltVehicle, iconStyle=DEFAULT_ICON_STYLE, lineStyle=DEFAULT_LINE_STYLE, dataType=RAW_DATA_TYPE) track.save() # set cache for next time pickledTrack = pickle.dumps(track, pickle.HIGHEST_PROTOCOL) cache.set(cacheKey, pickledTrack, TRACK_CACHE_TIMEOUT) ###################################################################### # asset position ###################################################################### # create a NewAssetPosition row params = { 'track': track, 'timestamp': sourceTimestamp, 'serverTimestamp': serverTimestamp, 'latitude': lat, 'longitude': lon, 'heading': heading, 'altitude': None, } pos = PastPosition(**params) pos.save() # note: could queue for bulk save instead cpos = CurrentPosition(**params) cpos.saveCurrent() pos.broadcast() self.publisher.sendDjango(cpos)