def get(self): # Yup, we are just using the Seattle's web UI for 911 # calls. There is an API available for this # (https://data.seattle.gov/Public-Safety/Seattle-Real-Time-Fire-911-Calls/kzjm-xkqj), # but it lacks timestamps, and it doesn't really have all the # data. url = ( 'http://www2.seattle.gov/fire/realtime911/getRecsForDatePub.asp?' 'action=Today&incDate=&rad1=des') res = urlfetch.fetch(url) if res.status_code != httplib.OK: raise ValueError( 'The request failed with error code {0}: {1}'.format( res.status_code, res.content)) lines = res.content.splitlines() bulk_insertions = [] for i in xrange(len(lines)): if '<tr id=row_' not in lines[i]: continue time_string = extract_td_text(lines[i + 1]) incident_id = extract_td_text(lines[i + 2]) units = extract_td_text(lines[i + 4]).split() address = extract_td_text(lines[i + 5]) type = extract_td_text(lines[i + 6]) # TODO: Make this code more defensive, so we don't fail # due to one bad record. # TODO: This datetime object does not have any timezone # data. As of this writing, the offset is PDT. At some # point, this code should be amended to add timezone # information. time = datetime.datetime.strptime(time_string, '%m/%d/%Y %I:%M:%S %p') incident = models.Incident.get_by_id(incident_id) if incident: # Have more units been dispatched for the incident? If # so, update the number of units. for current_unit in units: if current_unit not in incident.units: incident.units = set(incident.units + units) incident.put() break else: bulk_insertions.append( models.Incident( id=incident_id, address=address, units=units, type=type, time=time, original_text=lines[i:i + 8], )) if bulk_insertions: ndb.put_multi(bulk_insertions)
def index(): """ Handler for the main page. This displays the cached (hopefully) results. """ p = Pinger.load() i = models.Incident().fetch() return render_template('index.html', pinger=p, incidents=i)
def cron(): """ The primary checker. This is the endpoint run each time cron runs the checker. We will check all services, and then send notifications if necessary, as well as insert details into Mongo. """ p = Pinger.load() for s in p.services: s.check() alert = s.check_notifications() if alert.get('just_up'): m = models.Incident() m.insert(service.freeze) # send backup notifications body = f'{s.pretty_name} is BACK UP after {s.last_n} pings.' msg = notification.Notification(s.pretty_name, body) try: msg.send() except Exception as e: app.logger.error(e) if alert.get('just_down'): # write to mongo m = models.Incident() m.insert(service.freeze) # now send msg body = f'{s.pretty_name} just went down. {s.response}' msg = notification.Notification(s.pretty_name, body) try: msg.send() except Exception as e: app.logger.error(e) p.save() return '<html>cron complete</html>'
def clear_all_mongo(): """ Endpoint to initiate the deletion of all Mongo documents. """ m = models.Incident() if m.clear_all(): return '<html>clear complete</html>' else: h = 'Error locating cache' d = 'Could not load the cache OR load obj from file. Serious issue.' return render_template('error.html', status_code=400, headline=h, description=d)
def dump_pinger(): """ Return a pretty print of all the service objects. This endpoint was intended for debuging only. """ p = Pinger.load() if p: svcs = p.services serialized = [] for svc in svcs: txt = svc.to_dict() serialized.append(txt) # mongo m = [] cursor = models.Incident().fetch() for row in cursor: i = { '_id': str(row['_id']), 'alive': str(row['alive']), 'response': str(row['response']), 'timestamp': str(row['timestamp']), } m.append(i) pinger = { 'version': __version__, 'created': p.created, 'updated': p.updated, 'services': serialized, 'mongo': m } return pinger, 200 # will be returned with jsonify else: h = 'Error dumping' d = 'Error finding cazche or creating obj via file' return render_template('error.html', status_code=400, headline=h, description=d)