def _fromGraphData(graph, uri): rows = list( graph.query(""" SELECT ?t ?mail WHERE { { ?uri exif:dateTime ?t } UNION { ?uri pho:fileTime ?t } . OPTIONAL { ?mail dcterms:hasPart ?uri } } ORDER BY ?t LIMIT 1""", initBindings={'uri': uri})) # wrong: exif datetime is preferred over email time, but this is # still picking email time. if not rows or rows[0]['mail']: rows = graph.query("""SELECT ?t WHERE { ?email a pho:Email ; dcterms:created ?t ; dcterms:hasPart ?uri . }""", initBindings={'uri': uri}) if not rows: return None photoDate = rows[0]['t'] try: sec = iso8601.parse(str(photoDate)) except Exception: # i think this is the 1-hour error bug on the site. incoming # dates might not have any zone, but we can make a guess about # their local time sec = iso8601.parse(str(photoDate) + '-0800') # todo: this is losing the original tz unnecessarily return datetime.datetime.fromtimestamp(sec, tzlocal())
def __init__(self, id=None, flow_id=None, flow_name=None, post_type=None, reply_ids=None, file_ids=None, user_id=-1, content=None, star=None, created_at=None, updated_at=None): self.id = id self.flow_id = flow_id self.flow_name = flow_name self.post_type = post_type self.content = content self.star = star self.created_at = iso8601.parse(created_at) if created_at else None self.updated_at = iso8601.parse(updated_at) if updated_at else None self.reply_ids = reply_ids or [] self.file_ids = file_ids or [] self.user_id = user_id self.files = list() self.comments = list()
def _extract_response(self, method, soap_response): if method == "RequestMultipleSecurityTokens": paths =("./wst:RequestSecurityTokenResponseCollection", "./wst:TokenType", "./wsp:AppliesTo/wsa:EndpointReference/wsa:Address", "./wst:LifeTime/wsu:Created", "./wst:LifeTime/wsu:Expires", "./wst:RequestedSecurityToken/wsse:BinarySecurityToken", "./wst:RequestedSecurityToken/xmlenc:EncryptedData/xmlenc:CipherData/xmlenc:CipherValue", "./wst:RequestedProofToken/wst:BinarySecret") result = self.__response_tokens + [soap_response] soap_utils = SOAPUtils(NS_SHORTHANDS) responses = soap_utils.find_ex(soap_response.body, paths[0]) for response in responses: token = SecurityToken() token.type = soap_utils.find_ex(response, paths[1]).text token.service_address = soap_utils.find_ex(response, paths[2]).text token.lifetime[0] = iso8601.parse(soap_utils.find_ex(response, paths[3]).text) token.lifetime[1] = iso8601.parse(soap_utils.find_ex(response, paths[4]).text) t = soap_utils.find_ex(response, paths[5]) if t is not None: token.security_token = t.text else: token.security_token = soap_utils.find_ex(response, paths[6]).text proof_token = soap_utils.find_ex(response, paths[7]) if proof_token is not None: token.proof_token = proof_token.text self.__storage[token.service_address] = token result.append(token) self.__response_tokens = [] return result else: return SOAPService._extract_response(self, method, soap_response)
def __init__(self, requester, id=None, file_name=None, file_size=0, post_id=None, content_type=None, is_image=False, meta_data=None, width=None, height=None, url=None, thumbnail_url=None, created_at=None, updated_at=None ): self.__requester = requester self.__url = url self.id = id self.file_name = file_name self.file_size = file_size self.post_id = post_id self.content_type = content_type self.is_image = is_image self.meta_data = None if meta_data is None else json.loads(meta_data) self.width = int(width) if width else None self.height = int(height) if height else None self.thumbnail_url = thumbnail_url self.created_at = iso8601.parse(created_at) if created_at else None self.updated_at = iso8601.parse(updated_at) if updated_at else None
def personAgeString(isoBirthday, photoDate): try: sec = iso8601.parse(str(photoDate)) except Exception: sec = iso8601.parse(str(photoDate) + '-0700') birth = iso8601.parse(isoBirthday) days = (sec - birth) / 86400 if days / 30 < 12: return "%.1f months" % (days / 30) else: return "%.1f years" % (days / 365)
def getUserDict(user): try: date = datetime.fromtimestamp(parse( user.published.text)).strftime("%d.%m.%Y") except: date = "" try: description = user.description.text except: description = "" try: thumbnail = user.thumbnail.url except: thumbnail = "ADD_DEFAULT_VIDEO_ICON_URL" return { 'name': user.username.text, 'date': date, 'description': description, 'thumbnail': thumbnail, 'subscribed': user.statistics.subscriber_count, 'subscribed_formated': locale.format('%d', int(user.statistics.subscriber_count), True) }
def do_search(narrative): all_results = [] for search in narrative.guardiansearch_set.all(): results = search.do() for article in results: a = {} a['date'] = datetime.fromtimestamp(iso8601.parse(article['webPublicationDate'])) a['istoday'] = (date.today() == a['date'].date()) a['thisweek'] = (a['date'].date() > date.today() - timedelta(7)) a['search'] = [search] if 'fields' in article and 'body' in article['fields']: if article['fields']['body'] == '<!-- Redistribution rights for this field are unavailable -->' or len(article['fields']['body']) == 0: a['firstpara'] = "" else: a['firstpara'] = strip_html(article['fields']['body']).split('\n')[0] else: a['firstpara'] = "" a['data'] = article all_results.append(a) all_results = sorted(all_results, key=lambda article: article['date'], reverse=True) # Remove duplicates results = [] grouped = itertools.groupby(all_results, lambda article: article['data']['id']) for group, articles in grouped: articles = list(articles) for a in articles[1:]: articles[0]['search'] += a['search'] results.append(articles[0]) return results
def iso_8601_parse(time_string): """ Y-m-dTH:M:S """ from xml.utils.iso8601 import parse return datetime.datetime.fromtimestamp(parse(time_string))
def parse_atom(atom, mode): entries = [] for entry in atom.findall("{http://www.w3.org/2005/Atom}entry"): e = {} id = entry.findtext("{http://www.w3.org/2005/Atom}id") if mode == "search": id = id.split(":")[2] else: id = id.split("/")[5] e["id"] = long(id) title = entry.findtext("{http://www.w3.org/2005/Atom}title") if mode != "search": (userid, title) = title.split(": ", 1) e["userid"] = userid e["username"] = entry.findtext("{http://www.w3.org/2005/Atom}author/{http://www.w3.org/2005/Atom}name") else: userid = entry.findtext("{http://www.w3.org/2005/Atom}author/{http://www.w3.org/2005/Atom}name") (userid, username) = userid.split(" (", 1) e["userid"] = userid e["username"] = username[:-1] e["title"] = title e["time"] = parse(entry.findtext("{http://www.w3.org/2005/Atom}published")) links = entry.findall("{http://www.w3.org/2005/Atom}link") for link in links: rel = link.get("rel") if rel == "alternate": e["alternate"] = link.get("href") elif rel == "image": e["image"] = link.get("href") entries.append(e) return entries
def __parseCityNode(cityNode): for subNode in cityNode: if (subNode.tag == "id"): id = subNode.text if (subNode.tag == "name"): name = subNode.text if (subNode.tag == "country"): country = subNode.text if (subNode.tag == "latitude"): latitude = subNode.text if (subNode.tag == "longitude"): longitude = subNode.text if (subNode.tag == "creationDateTime"): creationDateTime = parse(subNode.text) if (subNode.tag == "updateDateTime"): updateDateTime = parse(subNode.text) return City(id, name, country, latitude, longitude, creationDateTime, updateDateTime)
def __parseProviderNode(node): for subNode in node: if (subNode.tag == "id"): id = subNode.text if (subNode.tag == "cityId"): cityId = subNode.text if (subNode.tag == "shortName"): shortName = subNode.text if (subNode.tag == "fullName"): fullName = subNode.text if (subNode.tag == "locationsUpdated"): locationsUpdated = subNode.text if (subNode.tag == "creationDateTime"): creationDateTime = parse(subNode.text) if (subNode.tag == "updateDateTime"): updateDateTime = parse(subNode.text) return Provider(id, cityId, shortName, fullName, locationsUpdated, creationDateTime, updateDateTime)
def _decode_date(self, str): if len(str) == 10: _date = time.strptime(str, '%Y-%m-%d') _date = datetime.datetime(_date.tm_year, _date.tm_mon, _date.tm_mday,0,0,0) else: _date = datetime.datetime.fromtimestamp(iso8601.parse(str)) _date = _date.replace(tzinfo=None) return _date
def __init__(self, id=None, flow_id=None, flow_name=None, reply_to=None, content=None, user_id=-1, created_at=None, updated_at=None): self.id = id self.flow_id = flow_id self.flow_name = flow_name self.reply_to = reply_to self.content = content self.created_at = iso8601.parse(created_at) if created_at else None self.updated_at = iso8601.parse(updated_at) if updated_at else None self.user_id = user_id
def date_parse(time_string): """Return a datetime object for the given ISO8601 string. Args: time_string: An ISO8601 timestamp. Returns: A datetime.datetime object. """ return datetime.datetime.utcfromtimestamp(iso8601.parse(time_string))
def format_date(date, to_format="%Y/%m/%d %H:%M:%S"): """ Format standard rails timestamp to more human readable format @type date: string @param date: arguments for the function @return string, formatted date """ t = iso8601.parse(date) return time.strftime(to_format, time.localtime(t))
def date_parse(time_string): """Return a datetime object for the given ISO8601 string. Args: time_string: An ISO8601 timestamp. Returns: A datetime.datetime object. """ return datetime.datetime.utcfromtimestamp( iso8601.parse(time_string))
def photoCreated(graph, uri): """datetime of the photo's creation time. Cached for the life of this process""" try: ret = _photoCreated[uri] if isinstance(ret, ValueError): raise ret return ret except KeyError: pass rows = list(graph.query(""" SELECT ?t ?mail WHERE { { ?uri exif:dateTime ?t } UNION { ?uri pho:fileTime ?t } . OPTIONAL { ?mail dcterms:hasPart ?uri } } ORDER BY ?t LIMIT 1""", initBindings={'uri' : uri})) # wrong: exif datetime is preferred over email time, but this is # still picking email time. if not rows or rows[0]['mail']: rows = graph.query("""SELECT ?t WHERE { ?email a pho:Email ; dcterms:created ?t ; dcterms:hasPart ?uri . }""", initBindings={'uri' : uri}) if not rows: # also look up the :alternate tree for source images with times _photoCreated[uri] = ValueError("can't find a date for %s" % uri) raise _photoCreated[uri] photoDate = rows[0]['t'] try: sec = iso8601.parse(str(photoDate)) except Exception: # i think this is the 1-hour error bug on the site. incoming # dates might not have any zone, but we can make a guess about # their local time sec = iso8601.parse(str(photoDate) + '-0700') # todo: this is losing the original tz unnecessarily ret = datetime.datetime.fromtimestamp(sec, tzlocal()) _photoCreated[uri] = ret return ret
def format_date(date, to_format="%Y/%m/%d %H:%M:%S"): """ Format standard rails timestamp to more human readable format @type date: string @param date: arguments for the function @return string, formatted date """ if not date: return "" t = iso8601.parse(date) return time.strftime(to_format, time.localtime(t))
def __parseStationNode(stationNode): for subNode in stationNode: if (subNode.tag == "id"): stationId = subNode.text if (subNode.tag == "providerId"): providerId = subNode.text if (subNode.tag == "externalId"): externalId = subNode.text if (subNode.tag == "name"): name = subNode.text if (subNode.tag == "description"): description = subNode.text if (subNode.tag == "latitude"): latitude = subNode.text if (subNode.tag == "longitude"): longitude = subNode.text if (subNode.tag == "creationDateTime"): creationDateTime = parse(subNode.text) if (subNode.tag == "updateDateTime"): updateDateTime = parse(subNode.text) return Station(stationId, providerId, externalId, name, description, latitude, longitude, creationDateTime, updateDateTime)
def __init__(self, id=None, name=None, email_address=None, created_at=None, updated_at=None, default_channel=False, owner_name=None, quota_percentage=0.0, quota_count=0, rss_url=None): self.id = id self.name = name self.email_address = email_address self.created_at = iso8601.parse(created_at) if created_at else None self.updated_at = iso8601.parse(updated_at) if updated_at else None self.is_default = default_channel self.owner_name = owner_name self.quota_percentage = quota_percentage self.quota_count = int(quota_count) self.rss_url = rss_url self.invitations = list() self.owner_id = None
def getVideoDict(entry): try: duration = datetime.fromtimestamp(int(entry.media.duration.seconds)).strftime("%M:%S") except: duration = "FAIL" try: description = entry.content.text except: description = "" try: thumbnail = entry.media.thumbnail[0].url.replace('0.jpg', 'default.jpg', 1) except: thumbnail = "ADD_DEFAULT_VIDEO_ICON_URL" try: keywords = entry.media.keywords.text except: keywords = "" try: author = entry.author[0].name.text except: author = "" try: player = entry.media.player.url except: player = "" try: published = datetime.fromtimestamp(parse(entry.published.text)).strftime("%d.%m.%Y") except: published = "" try: viewed = entry.statistics.view_count, except: viewed = "" return { 'title': entry.media.title.text, 'date': entry.published.text, 'description': description, 'thumbnail': thumbnail, 'keywords': keywords, 'player': player, 'author': author, 'duration': duration, 'id': entry.id.text.split('/').pop(), 'published': published, 'viewed': viewed }
def getVideoDict(entry): try: duration = datetime.fromtimestamp(int(entry.media.duration.seconds)).strftime("%M:%S") except: duration = "FAIL" try: description = entry.content.text except: description = "" try: thumbnail = entry.media.thumbnail[0].url.replace("0.jpg", "default.jpg", 1) except: thumbnail = "ADD_DEFAULT_VIDEO_ICON_URL" try: keywords = entry.media.keywords.text except: keywords = "" try: author = entry.author[0].name.text except: author = "" try: player = entry.media.player.url except: player = "" try: published = datetime.fromtimestamp(parse(entry.published.text)).strftime("%d.%m.%Y") except: published = "" try: viewed = (entry.statistics.view_count,) except: viewed = "" return { "title": entry.media.title.text, "date": entry.published.text, "description": description, "thumbnail": thumbnail, "keywords": keywords, "player": player, "author": author, "duration": duration, "id": entry.id.text.split("/").pop(), "published": published, "viewed": viewed, }
def __parseStationStatusNode(stationStatusNode): for subNode in stationStatusNode: if (subNode.tag == "cityId"): cityId = subNode.text if (subNode.tag == "stationId"): stationId = subNode.text if (subNode.tag == "availableBikes"): availableBikes = subNode.text if (subNode.tag == "freeSlots"): freeSlots = subNode.text if (subNode.tag == "totalSlots"): totalSlots = subNode.text # if (subNode.tag == "creationDateTime"): # creationDateTime = parse(subNode.text) if (subNode.tag == "updateDateTime"): updateDateTime = parse(subNode.text) return StationStatus(cityId, stationId, availableBikes, freeSlots, totalSlots, updateDateTime)
def __init__(self, xml_node): soap_utils = SOAPUtils(NS_SHORTHANDS) self.membership_id = soap_utils.find_ex(xml_node, "./ab:MembershipId").text self.type = soap_utils.find_ex(xml_node, "./ab:Type").text self.state = soap_utils.find_ex(xml_node, "./ab:State").text self.deleted = SOAPUtils.bool_type(soap_utils.find_ex(xml_node, "./ab:Deleted").text) self.last_changed = iso8601.parse(soap_utils.find_ex(xml_node, "./ab:LastChanged").text) passport = soap_utils.find_ex(xml_node, "./ab:PassportName") if passport is not None: self.account = passport.text self.network_id = NetworkID.MSN else: self.account = soap_utils.find_ex(xml_node, "./ab:Email").text self.network_id = NetworkID.EXTERNAL display_name = soap_utils.find_ex(xml_node, "./ab:DisplayName") if display_name is not None: self.display_name = display_name.text else: self.display_name = self.account.split("@", 1)[0]
def getUserDict(user): try: date = datetime.fromtimestamp(parse(user.published.text)).strftime("%d.%m.%Y") except: date = "" try: description = user.description.text except: description = "" try: thumbnail = user.thumbnail.url except: thumbnail = "ADD_DEFAULT_VIDEO_ICON_URL" return { 'name': user.username.text, 'date': date, 'description': description, 'thumbnail': thumbnail, 'subscribed': user.statistics.subscriber_count, 'subscribed_formated': locale.format('%d', int(user.statistics.subscriber_count), True) }
def set_read_to_narrative(request, slug): date = request.POST['date'] date = datetime.fromtimestamp(iso8601.parse(date)) if request.user.is_anonymous(): return HttpResponse('') narrative = Narrative.objects.get(slug=slug) try: read_to = ReadTo.objects.get(user=request.user, narrative=narrative) except ReadTo.DoesNotExist: read_to = None if read_to is not None: read_to.date = date read_to.save() else: read_to = ReadTo.objects.create(user=request.user, narrative=narrative, date=date) return HttpResponse('')
def unixFromLiteral(x): return iso8601.parse(str(x))
# Setup the GData service g_client = gdata.docs.service.DocsService() g_client.ClientLogin(GOOGLE_LOGIN, GOOGLE_PWD, account_type=GOOGLE_ACT_TYPE, source=SOURCE) query = gdata.docs.service.DocumentQuery() query['title'] = options.doc feed = g_client.Query(query.ToUri()) if feed.entry: entry = feed.entry[0] type = entry.GetDocumentType() # Get the relevant document attributes link = entry.GetAlternateLink().href timestamp = iso8601.parse(entry.updated.text) updated = datetime.fromtimestamp(timestamp).strftime("%b %m %Y, %I:%M %p") modified_by_name = entry.lastModifiedBy.name.text modified_by_email = entry.lastModifiedBy.email.text title = entry.title.text print "Found Google Doc: {0}".format(title) path = os.path.join(tempfile.gettempdir(), '{0}.{1}'.format(title.lower().replace(' ', '_'), EXTENSIONS[type])) if type == 'spreadsheet': # Get an auth token for the spreadsheet service and swap if # for the one we currently use ss_client = gdata.spreadsheet.service.SpreadsheetsService()
def __init__(self, filename): #self.db = db self.useless = ('', 'created_by', 'source', 'editor', 'ele', 'time', 'editor', 'author', 'hdop', 'pdop', 'sat', 'speed', 'fix', 'course', 'converted_by', 'attribution', 'upload_tag', 'history') self.User = {} self.currentId = 0 self.goodUser = ('') self.TilesCreated = 0 self.Nodes = {} self.NodesToWays = {} self.NodesToWaysR = {} self.Ways = {} self.Borders = {} self.Places = {} self.DatesGraph = {} self.RoutableWays = set([]) self.BPlaces = {} self.currentUser = "" # user who created latest obj self.currentType = "" # latest obj type self.NodesCount = 0 self.PlacesCount = 0 self.RelationsCount = 0 self.Address = {} self.currentMembers = [] self.currentRelID = 0 self.WaysCount = 0 self.prevtime = time.time() self.prevcount = 0 self.BorderList = [] self.BordersCount = 0 self.MinLaTile = 100000000 self.MaxLaTile = -100000000 self.MinLoTile = 100000000 self.MaxLoTile = -10000000 self.FillTile = 0 self.TagsList = {} self.ZoomLevel = 200.0 #130 self.Tiles = {} self.FixMe = _('<b>FixMe</b>') self.AddrRelLinks = '' self.MaxTagsElems = 20 self.LastChange = '' self.FirstChange = 'z' self.WaysPassed = set([]) self.CountryName = CountryName self.packlatlon = lambda lat, lon: lon / abs(lon) * (int( abs(lat * 1000000)) * 2 + ( (lat / abs(lat) + 1) / 2) + abs(lon / 360)) self.unpacklatlon = lambda n: (-(-1)**(int(n) % 2) * abs( int(n / 2) / 1000000.), (n / abs(n)) * (abs(n) % 1) * 360) htmlheader = "<html><head><title>%%s%s %s: %%s</title><meta http-equiv=Content-Type content=\"text/html; charset=UTF-8\" /><script src=\"/stat/sorttable.js\"></script></head><body>" % ( _("OSM Stats"), self.CountryName) htmltablestart = "<table class=\"sortable\" style=\"width: 100%; border: 1px solid gray\" border=1 width=100%>" def htmltablerow(cols): tr = "<tr>" for col in cols: if type(col) == type(float()): tr = tr + "<td align=\"right\">%.3f</td>" % (col, ) else: tr = tr + "<td>%s</td>" % (col, ) return tr + "</tr>\n" if ifmod('warnings'): self.warningsFile = open('warnings.html', 'w') self.warningsFile.write(htmlheader % (" ", _("Warnings"))) self.warningsFile.write(htmltablestart) self.warningsFile.write(htmltablerow( (_("Warning type"), _("Way")))) try: parser = make_parser() parser.setContentHandler(self) parser.parse(filename) except xml.sax._exceptions.SAXParseException: sys.stderr.write(_("Error loading %s\n") % filename) ## Dealing with BorderList if ifmod('borders'): self.BorderList.sort((lambda x, y: (int(y[0] > x[0]) * 2) - 1)) self.unnamedBordersFile = open('unnamed.html', 'w') in_unnamed = 0 in_named = 0 perpage = 500 for tt in self.BorderList: dens, area, nodesInWay, bbox = tt if (in_unnamed % perpage) == 0: self.unnamedBordersFile.write('</table></body></html>') self.unnamedBordersFile.close() self.unnamedBordersFile = open( 'unnamed%s.html' % (int(in_unnamed / perpage) + 1), 'w') self.unnamedBordersFile.write(htmlheader % (" ", _("Unnamed borders"))) self.unnamedBordersFile.write(htmltablestart) self.unnamedBordersFile.write( htmltablerow( (_("Area [km<sup>2</sup>]"), _("Nodes"), _("Density [nodes/km<sup>2</sup>]"), _("Link")))) in_unnamed += 1 bf = self.unnamedBordersFile bf.write(htmltablerow( (area, nodesInWay, dens, linkBbox(bbox)))) self.unnamedBordersFile.write('</table></body></html>') self.unnamedBordersFile.close() filename = open('unnamed.html', 'w') filename.write(htmlheader % (" ", _("Unnamed borders pages list"))) filename.write("<body><h2>%s</h2><ul>" % _("Unnamed borders pages list")) for i in range(1, int(in_unnamed / perpage) + 2): filename.write('<li><a href=unnamed%s.html>%s %s</a>' % (i, _("Page"), i)) filename.write("</ul></body></html>") filename.close() if ifmod('addr'): dirname = "addr" if not os.path.isdir("./" + dirname + "/"): os.mkdir("./" + dirname + "/") filename = open('addr/index.html', 'w') filesStarted = ['addr/index.html'] filename.write(htmlheader % (" ", _("Postal address relations"))) def AddrTreeRecurse(top, wayback="» "): if top in self.WaysPassed: if ifmod('warnings'): self.warningsFile.write( htmltablerow( (_("double-pass on address relation"), top))) return wayback = wayback + "%s<a href=%s.html>%s</a> » " % ( linkRelationIcon(top), top, self.Address[top]["tags"].get( "name", top)) filename = open('addr/%s.html' % (top, ), 'w') filename.write(htmlheader % ("", self.Address[top]["tags"].get( "name", "Unknown address"))) filename.write(" %s<p>" % wayback) nodes = 0 ways = 0 rels = 0 self.WaysPassed.add(top) for ctype, child in self.Address[top]["child"]: if ctype == 'node': if nodes == 0: filename.write(htmltablestart) filename.write( htmltablerow( (_("Name"), _("Density"), _("Area"), _("Nodes"), _("Type"), _("Link")))) nodes += 1 if int(child) in self.BPlaces: area, nodesIn, bbox, tags = self.BPlaces[int( child)] filename.write(htmltablerow((\ tags["name"], \ 1.* nodesIn / area, \ area, \ nodesIn, \ tags["place"], \ linkBboxMarker(bbox, (tags["lat"],tags["lon"])), \ ))) elif ifmod('places') and (int(child) in self.Places): filename.write(htmltablerow((\ self.Places[int(child)].get("name", child), \ " ", \ " ", \ " ", \ self.Places[int(child)].get("place", _("??? unknown")), \ linkNode(child), \ ))) else: filename.write(htmltablerow((linkNode(child), ))) filename.write("</table>") for ctype, child in self.Address[top]["child"]: if ctype == 'way': filename.write(linkWay(child) + " ") for ctype, child in self.Address[top]["child"]: if ctype == 'relation': if rels == 0: filename.write(_("<h3>Relations</h3>")) rels += 1 if child in self.Address: filename.write("<br>%s<a href=%s.html>%s</a> " % (linkRelationIcon(child), child, self.Address[child]["tags"].get( "name", child))) AddrTreeRecurse(child, wayback) else: filename.write(linkRelation(child)) #print self.BPlaces ##filename.write(str(self.Address[top]["child"])) filename.write("</body></html>") filename.close() while self.Address: top = self.Address.keys()[0] while self.Address[top]["parent"] in self.Address: top = self.Address[top]["parent"] AddrTreeRecurse(top) filename.write( "<a href=%s.html>%s</a> " % (top, self.Address[top]["tags"].get("name", top))) self.AddrRelLinks = self.AddrRelLinks + "<a href=addr/%s.html>%s</a> " % ( top, self.Address[top]["tags"].get("name", top)) #print self.Address[top] for aaaaa in self.WaysPassed: del self.Address[aaaaa] self.WaysPassed = set([]) filename.close() for toClose in filesStarted: filename = open(toClose, 'a') filename.write("</body></html>") filename.close() # end ifmod('addr') if ifmod('tags'): filename = open("tags.html", 'w') taglist = self.TagsList.keys() taglist.sort() alphabet = [] for k in taglist: filename.close() if k: alpha = k[0] else: alpha = "" if alpha not in ("a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"): alpha = "other" tt = _("other") if alpha not in alphabet: alphabet.append(alpha) filename = open("tags-%s.html" % (alpha, ), 'w') filename.write(htmlheader % ("[%s]" % _(alpha), _("Tags"))) filename.write(""" <script type="text/javascript"> var d = document; var offsetfromcursorY=15 // y offset of tooltip var ie=d.all && !window.opera; var ns6=d.getElementById && !d.all; var tipobj,op=0; var left,top=0; function tt(el,txt) { if (d.getElementById('mess').style.visibility=='hidden'){ tipobj=d.getElementById('mess'); e = el; tipobj.innerHTML = '<div style="float:right;align:right;cursor:pointer;cursor:hand;border-bottom:1px solid grey;padding:2px;border-left:1px solid black;" onclick="hide_info(this)">x</div>'+ txt; el.onmousemove=positiontip;};} function hide_info(el) {op=0;tipobj.style.opacity = op; d.getElementById('mess').style.visibility='hidden';} function ietruebody(){return (d.compatMode && d.compatMode!="BackCompat")? d.documentElement : d.body} function positiontip(e) { if (d.getElementById('mess').style.visibility=='hidden'){ var curX=(ns6)?e.pageX : event.clientX+ietruebody().scrollLeft; var curY=(ns6)?e.pageY : event.clientY+ietruebody().scrollTop; var winwidth=ie? ietruebody().clientWidth : window.innerWidth-20 var winheight=ie? ietruebody().clientHeight : window.innerHeight-20 var rightedge=ie? winwidth-event.clientX : winwidth-e.clientX; var bottomedge=ie? winheight-event.clientY-offsetfromcursorY : winheight-e.clientY-offsetfromcursorY; ol = left; ot = top; left=0;top=0; if (rightedge < tipobj.offsetWidth) left=curX-tipobj.offsetWidth; else left=curX; if (bottomedge < tipobj.offsetHeight) top=curY-tipobj.offsetHeight-offsetfromcursorY else top=curY+offsetfromcursorY; if (left <= 0){left = 0;}; if (top <= 0){top = 0;}; if ( ((ol-left)*(ol-left)+(ot-top)*(ot-top)) >400){ tipobj.style.left = left; tipobj.style.top = top; } else{ol=left;ot=top;} };} function ap(el) {el.onmousemove='';op=tipobj.style.opacity;if (op==0){ op = 1; tipobj.style.opacity = op; tipobj.style.visibility="visible";}; if(op < 1) {op += 0.1; tipobj.style.opacity = op;tipobj.style.filter = 'alpha(opacity='+op*100+')';t = setTimeout('appear()', 30);};}</script> <style>span{cursor:pointer;cursor:hand;} span:hover{color:navy}</style> </head><body><div id="mess" style="visibility: hidden; position:absolute; background-color:white; border:1px dotted red; width:400px; height: 350px; overflow:auto "></div><table class="sortable" style="width: 100%; border: 1px solid gray" border=1 width=100%> <tr><td>k<td>num<td>v's""") else: filename = open("tags-%s.html" % (alpha, ), 'a') filename.write( "<tr><td>%s<td>%s<td>" % \ (k, self.TagsList[k][1] )) vallist = self.TagsList[k].keys() vallist.sort() for v in vallist: if v is not 1: uslist = self.TagsList[k][v].keys() uslist.sort() z = " " if self.MaxTagsElems > len( self.TagsList[k][v][objN("way")]): if self.TagsList[k][v][objN("way")]: z = z + _("Ways: ") for p in self.TagsList[k][v][objN("way")]: z = z + linkWay(p) + " " if self.MaxTagsElems > len( self.TagsList[k][v][objN("node")]): if self.TagsList[k][v][objN("node")]: z = z + "<br>" + _("Nodes: ") for p in self.TagsList[k][v][objN("node")]: z = z + linkNode(p) + " " if self.MaxTagsElems > len( self.TagsList[k][v][objN("relation")]): if self.TagsList[k][v][objN("relation")]: z = z + "<br>" + _("Relations: ") for p in self.TagsList[k][v][objN("relation")]: z = z + linkRelation(p) + " " for p in uslist: if p not in (1, 2, 3, 5): z = z + "<br>%s: %s " % ( linkUser(self.User[p]["Name"]), self.TagsList[k][v][p]) filename.write( """<span onmouseover="tt(this,'%s<br>')" onclick="ap(this)">%s (%s)</span>, """ % \ (z, v, self.TagsList[k][v][1] )) filename.close() for alpha in alphabet: filename = open("tags-%s.html" % (alpha, ), 'a') filename.write("</table>") for link in alphabet: filename.write("\n<a href=tags-%s.html> %s </a>| " % (link, _(link))) filename.write("</body></html>") filename.close() ## end ifmod('tags') userlist = self.User.keys() userlist.sort( lambda x, y: self.User[y]["Nodes"] - self.User[x]["Nodes"]) if ifmod('users'): kml = open('users.kml', 'w') kml.write("""<?xml version="1.0" encoding="UTF-8" standalone="no"?> <kml xmlns="http://earth.google.com/kml/2.0"> <Document> <Style id="rss"> <IconStyle> <scale>0.5</scale> <Icon> <href>http://komzpa.net/favicon.ico</href> </Icon> </IconStyle> </Style> <Folder>""") kml.write("<name>%s users</name>" % (self.CountryName)) usersFile = open('users.html', 'w') usersFile.write(htmlheader % (" ", _("Users"))) usersFile.write(htmltablestart) usersFile.write( htmltablerow( (_("User"), _("Nodes"), _("Ways"), _("Relations"), _("%"), _("Speed [obj/day]"), _("First known edit"), _("Latest known edit"), _("Links")))) if ifmod('users-csv'): usersFileCsv = open('users.csv', 'w') for user in self.User.values(): if user["LastDate"] > self.LastChange: self.LastChange = user["LastDate"] ttt = """<Placemark> <name>%s</name> <description><h2><a href="http://openstreetmap.org/user/%s">%s</a></h2><br> Nodes: %s Ways: %s Relations: %s Since: %s </description> <styleUrl>#rss</styleUrl> <Point><coordinates>%s, %s</coordinates></Point> </Placemark>""" if ifmod('users-csv'): usersFileCsv.write("uid;name;nodes;ways;relations\r\n") ## end ifmod('users') for user in userlist: if self.FirstChange > self.User[user]["FirstDate"]: self.FirstChange = self.User[user]["FirstDate"] CHLink = " " if ifmod('calchome'): CSList = [] for i in self.User[user]["changesets"].values(): CSList.append([i["lat"], i["lon"], i["num"]]) if CSList: lat, lon = tuple(calcHome(CSList)) escapedName = cgi.escape(self.User[user]["Name"]) escapedName = escapedName.replace("+", "%20") CHLink = "<a href=\"http://openstreetmap.org/?mlat=%f&mlon=%f&minlat=%s&minlon=%s&maxlat=%s&maxlon=%s&box=yes\">%s</a>" % \ (lat, lon, self.User[user]["minlat"], self.User[user]["minlon"], self.User[user]["maxlat"], self.User[user]["maxlon"], _("CalcedHome")) kml.write(ttt % (escapedName, escapedName, escapedName, self.User[user]["Nodes"], \ self.User[user]["Ways"], \ self.User[user]["Relations"], \ self.User[user]["FirstDate"], \ lon, lat) ) speed = ((self.User[user]["Nodes"] + self.User[user]["Ways"] + self.User[user]["Relations"]) / (parse(self.LastChange) - parse(self.User[user]["FirstDate"]) + 1)) * 86400 if ifmod('users'): usersFile.write(htmltablerow(((linkUser(self.User[user]["Name"]), \ self.User[user]["Nodes"], \ self.User[user]["Ways"], \ self.User[user]["Relations"], \ 100. * (1.* self.User[user]["Nodes"] / self.NodesCount + 1. * self.User[user]["Ways"] / self.WaysCount) / 2,\ speed,\ self.User[user]["FirstDate"], \ self.User[user]["LastDate"], \ CHLink , \ )))) if ifmod('users-csv'): usersFileCsv.write("\"%s\";\"%s\";\"%d\";\"%d\";\"%d\"\r\n" % \ (user, self.User[user]["Name"], self.User[user]["Nodes"], self.User[user]["Ways"], self.User[user]["Relations"])) if ifmod('users'): kml.write("""</Folder> </Document> </kml>""") kml.close() usersFile.write("</table></body></html>") usersFile.close() if ifmod('users-csv'): usersFileCsv.close() ## end ifmod('users') if ifmod('places'): localLangs = _("name:be,name:en,name:ru") localLangs = localLangs.split(",") localLangs = tuple(localLangs) pointsFile = open('points.html', 'w') pointsFile.write(htmlheader % ("", _("Localities"))) pointsFile.write(htmltablestart) pointsFile.write( htmltablerow((_("Name"), ) + localLangs + (_("Type"), _("Link")))) for PointNum in self.Places.keys(): place = self.Places[PointNum] localLangsT = [] for langtag in localLangs: localLangsT.append(place.get(langtag, self.FixMe)) localLangsT = tuple(localLangsT) pointsFile.write(htmltablerow( (place.get("name",self.FixMe),\ ) + localLangsT + ( \ place['place'], linkMarker(place['lat'], place['lon'], _("goto") ) ) )) pointsFile.write("</table></body></html>") pointsFile.close() ## end ifmod('places') indexFile = open('index.html', 'w') indexFile.write(htmlheader % (_("Welcome to :"), "")) if ifmod('density'): indexFile.write( "<a href=density.png><img src='density.small.jpg' align='right'></a>" ) indexFile.write(_("<h3>OSM Statistics for %s</h3>") % self.CountryName) indexFile.write("<ul>") if ifmod('users'): indexFile.write(_("<li><a href=users.html>users</a></li>")) if ifmod('places'): indexFile.write(_("<li><a href=points.html>points</a></li>")) if ifmod('addr'): indexFile.write( _("<li>address relations: %s</li>") % self.AddrRelLinks) if ifmod('warnings'): indexFile.write(_("<li><a href=warnings.html>warnings</a></li>")) # FIXME what is it? #if ifmod('borders'): # indexFile.write(_("<li><a href=borders.html>borders</a></li>")) if ifmod('borders'): indexFile.write( _("<li><a href=unnamed.html>unnamed borders</a></li>")) if ifmod('routing'): indexFile.write( _("<li><a href=route.html>routing subgraphs</a></li>")) if ifmod('tags'): indexFile.write(_("<li>tags: ")) for alpha in alphabet: indexFile.write("\n<a href=tags-%s.html>%s</a> | " % (alpha, _(alpha))) indexFile.write("</ul>") if ifmod('density'): best = (0, 0) for i in self.Tiles.keys(): for j in self.Tiles[i].keys(): if len(self.Tiles[i][j]) > self.FillTile: self.FillTile = len(self.Tiles[i][j]) best = (i, j) self.DatesCSV = open('graph.csv', 'w') daysSorted = self.DatesGraph.keys() daysSorted.sort() lastDaysActivity = 0 for day in daysSorted: self.DatesCSV.write( "%s\t%s\t%s\t%s\n" % (day, self.DatesGraph[day][0], self.DatesGraph[day][1], self.DatesGraph[day][2])) lastDay = daysSorted[-1] for day in daysSorted[-15:]: if (abs(parse(lastDay) - parse(day))) <= (2 * 604800.0): lastDaysActivity += self.DatesGraph[day][0] + self.DatesGraph[ day][1] + self.DatesGraph[day][2] lastDaysActivity = lastDaysActivity / 14. self.DatesCSV.close() speed = (self.NodesCount + self.WaysCount + self.RelationsCount) / ( parse(self.LastChange) - parse(self.FirstChange)) * 86400 indexFile.write( _("<p>Average mapping speed: <b>%f</b> obj/day</p>") % (speed, )) if ifmod('density'): indexFile.write( _("<p>Tiles fill(avg/max): <b>%f</b>/<b>%s</b></p>") % ( self.NodesCount / (self.TilesCreated * 1.), self.FillTile, )) indexFile.write( _("<p>Number of Nodes: <b>%s</b></p>") % (self.NodesCount, )) indexFile.write( _("<p>Number of Ways: <b>%s</b></p>") % (self.WaysCount, )) indexFile.write( _("<p>Number of Relations: <b>%s</b></p>") % (self.RelationsCount, )) if ifmod('places') or ifmod('borders'): # FIXME FIXME FIXME indexFile.write( _("<p>Number of Borders/Places: <b>%s</b>/<b>%s</b></p>") % (self.BordersCount, self.PlacesCount)) indexFile.write( _("<p>First known update: <b>%s</b></p>") % (self.FirstChange, )) indexFile.write( _("<p>Last update: <b>%s</b></p>") % (self.LastChange, )) indexFile.write("</body></html>") indexFile.close() filename = open('oneline.inc.html', 'w') if ifmod('density'): filename.write("%s</a><td>%s</td><td>%s</td><td>%.3f</td><td>%s</td><td>%.3f</td><td>%s</td></tr>" % (self.CountryName, self.LastChange, len(self.User), speed, \ " ",lastDaysActivity,\ " ", \ )) else: filename.write("%s</a><td>%s</td><td>%s</td><td>%.3f</td><td>%s</td><td>%.3f</td><td>%s</td></tr>" % (self.CountryName, self.LastChange, len(self.User), speed, \ " ",lastDaysActivity,\ " ", \ )) filename.close() ### Making routing if ifmod('routing'): self.routeFile = open('route.html', 'w') self.routeFile.write(htmlheader % ("", _("Routes"))) self.routeFile.write(htmltablestart) self.routeFile.write( htmltablerow((_("Number of ways"), _("First way in group")))) wayQ = set([]) wayP = set([]) numRoute = 0 self.WayGroups = { 0: "", } self.WayGroupsId = { 0: "", } numR = len(self.RoutableWays) numP = 0 numPrev = 0 starttime = time.time() prevtime = starttime while self.RoutableWays: if time.time() - prevtime >= 6: print "still %s %s, %s, %s%%, ETA:%ss (%s/s)" % ( len(wayQ), numP, numR - numP, 1. * numP / numR * 100., (numR - numP) * (time.time() - prevtime) / (numP - numPrev), (numP - numPrev) / (time.time() - prevtime)) prevtime = time.time() time.sleep(1) numPrev = numP if not wayQ: popped = self.RoutableWays.pop() wayQ.add(popped) wayP.update(wayQ) sys.stderr.write("NewRoutable %d\n" % numRoute) numRoute += 1 self.WayGroups[numRoute] = 1 self.WayGroupsId[numRoute] = popped way = self.Ways[wayQ.pop()] #print way, wayQ for node in way: if len(self.NodesToWays[node]) != 1: for tway in self.NodesToWays[node]: if tway not in wayP: if tway in self.RoutableWays: wayQ.add(tway) wayP.add(tway) self.WayGroups[numRoute] += 1 self.RoutableWays.remove(tway) numP += 1 sys.stderr.write("WayGroups = %s\n" % repr(self.WayGroups)) for i in range(1, numRoute): self.routeFile.write( htmltablerow( (self.WayGroups[i], linkWayMap(self.WayGroupsId[i])))) self.routeFile.write('</table></body></html>') self.routeFile.close() ## Making pretty image if ifmod('density'): try: from PIL import Image, ImageDraw density = self.NodesCount / self.TilesCreated * 1. sys.stderr.write('Generating picture') width = self.MaxLoTile - self.MinLoTile height = self.MaxLaTile - self.MinLaTile img = Image.new("RGB", (self.MaxLoTile - self.MinLoTile + 1, self.MaxLaTile - self.MinLaTile + 2), (255, 255, 255)) gamma = 0.5 #draw = ImageDraw.Draw(img) #draw.text((0,0), "%s"%(self.CountryName) ,fill= (0,0,0)) png = img.load() # for i in self.Tiles.keys(): # for j in self.Tiles[i].keys(): # if len(self.Tiles[i][j]) > self.FillTile: # self.FillTile = len(self.Tiles[i][j]) for i in self.Tiles.keys(): for j in self.Tiles[i].keys(): c = len(self.Tiles[i][j]) * 1. / self.FillTile c = c**gamma t = int(c * 255) png[j - self.MinLoTile, -i + self.MaxLaTile] = (256 - t, 256 - t, t) for i in range(0, width + 1): c = i / (width + 1 * 1.) c = c**gamma t = int(c * 255) png[i, height + 1] = (256 - t, 256 - t, t) if height > width: oh = height height = 300 width = int(width * (1. * height / oh)) else: ow = width width = 300 height = int(height * (1. * width / ow)) img.save("density.png") print width, height img = img.resize((width, height), Image.ANTIALIAS) img.save("density.small.jpg") sys.stderr.write('.\n') except ImportError: # PIL import error sys.stderr.write("PIL import error, skip picture generation\n") pass ## end ifmod('density') if ifmod('warnings'): self.warningsFile.write('</table></body></html>') self.warningsFile.close()
def parse_iso_date(iso_date): return datetime.datetime.fromtimestamp(iso8601.parse(iso_date))
def __init__(self, date): if isinstance(date, str): from xml.utils.iso8601 import parse date = parse(date) self.date = date
def worker(job_json): """ For every incoming message, this worker function is called. Be extremely careful not to do anything CPU-intensive here, or you will see blocking. Sockets are async under gevent, so those are fair game. """ ''' todo: look into putting it into mysql, loading mysql into memcache look into logging to files per type id recurse into rowsets: every feed is not necessarily 1 typeID (though it usually is) ''' global f; global s; if REGIONS is not False: json_data = open(REGIONS) regionDict = json.load(json_data) json_data.close() else: pass # Receive raw market JSON strings. market_json = zlib.decompress(job_json); # Un-serialize the JSON data to a Python dict. market_data = simplejson.loads(market_json); # Gather some useful information name = market_data.get('generator'); name = name['name']; resultType = market_data.get('resultType'); rowsets = market_data.get('rowsets')[0]; # recurse into others, not just [0] typeID = rowsets['typeID']; columns = market_data.get('columns'); # Convert str time to int currentTime = parse(market_data.get('currentTime')); generatedAt = parse(rowsets['generatedAt']); numberOfSellItems = 0; numberOfBuyItems = 0; sellPrice = {} buyPrice = {} data = { # set defaults, will be overwritten during parsing, or use old cache? 'orders': { 'generatedAt': False, 'sell': False, 'buy': False }, 'history': { 'generatedAt': False} } if DEBUG: # write raw json to file try: file = open("type-"+str(typeID)+".txt", "w") try: file.write(market_json) # Write a string to a file finally: file.close() except IOError: pass ''' Cache is in this format: emdr-VERSION-REGIONID-TYPEID = {'orders': { 'generatedAt': timestamp, 'sell': [fiveAverageSellPrice, numberOfSellItems], 'buy': [fiveAverageBuyPrice, numberOfBuyItems] } 'history': [] } ''' if (REGIONS == False or (REGIONS != False and str(rowsets['regionID']) in regionDict)): cached = mc.get('emdr-'+str(VERSION)+'-'+str(rowsets['regionID'])+'-'+str(typeID)); # If data has been cached for this item, check the dates. If dates match, skip # todo: TEST TO MAKE SURE this is not deleting data, and only overwriting old cache if (cached != None): #if we have data in cache, split info into list cache = simplejson.loads(cached); if DEBUG: print "\n\n(",resultType,") Cache:",cache # parse date if cache[resultType]['generatedAt'] is not False: cachedate = cache[resultType]['generatedAt']; if (DEBUG): print "\nCached data found (result type: ",resultType,")!\n\tNew date: "+str(datetime.fromtimestamp(generatedAt))+"\n\tCached date: "+ str(datetime.fromtimestamp(cachedate)); if (generatedAt < cachedate): s += 1 if (DEBUG): print "\t\tSKIPPING"; return ''; data = cache # set default data to cached data if (ORDERS and resultType == 'orders'): data['orders']['generatedAt'] = generatedAt if (DEBUG): print "\n\n\n\n======== New record ========"; # Start putting pricing info (keys) into dicts with volume (values) for row in rowsets['rows']: order = dict(zip(columns, row)) if (order['bid'] == False): if (DEBUG): print "Found sell order for "+str(order['price']) + "; vol: "+str(order['volRemaining']); if (sellPrice.get(order['price']) != None): sellPrice[order['price']] += order['volRemaining']; else: sellPrice[order['price']] = order['volRemaining']; numberOfSellItems += order['volRemaining']; else: if (DEBUG): print "Found buy order for "+str(order['price']) + "; vol: "+str(order['volRemaining']); if (buyPrice.get(order['price']) != None): buyPrice[order['price']] += order['volRemaining']; else: buyPrice[order['price']] = order['volRemaining']; numberOfBuyItems += order['volRemaining']; #end loop if (DEBUG): print "\nSell dict:",sellPrice print "\nBuy dict:",buyPrice print "\nTotal volume on market: ",numberOfSellItems," Sell + ",numberOfBuyItems if (numberOfSellItems > 0): prices = sorted(sellPrice.items(), key=lambda x: x[0]); fivePercentOfTotal = max(int(numberOfSellItems*0.05),1); fivePercentPrice=0; bought=0; boughtPrice=0; if (DEBUG): print "Sell Prices (sorted):\n",prices print "Start buying process!" while (bought < fivePercentOfTotal): pop = prices.pop(0) fivePercentPrice = pop[0] if (DEBUG): print "\tBought: ",bought,"/",fivePercentOfTotal print "\t\tNext pop: ",fivePercentPrice," ISK, vol: ",pop[1] if (fivePercentOfTotal > ( bought + sellPrice[fivePercentPrice])): boughtPrice += sellPrice[fivePercentPrice]*fivePercentPrice; bought += sellPrice[fivePercentPrice]; if (DEBUG): print "\t\tHave not met goal. Bought:",bought else: diff = fivePercentOfTotal - bought; boughtPrice += fivePercentPrice*diff; bought = fivePercentOfTotal; if (DEBUG): print "\t\tGoal met. Bought:",bought fiveAverageSellPrice = boughtPrice/bought; if (DEBUG): print "Average selling price (first 5% of volume):",fiveAverageSellPrice data['orders']['sell'] = [ "%.2f" % fiveAverageSellPrice, numberOfSellItems] if (numberOfBuyItems > 0): prices = sorted(buyPrice.items(), key=lambda x: x[0], reverse=True); fivePercentOfTotal = max(int(numberOfBuyItems*0.05),1); fivePercentPrice=0; bought=0; boughtPrice=0; if (DEBUG): print "Buy Prices (sorted):\n",prices print "Start buying process!" while (bought < fivePercentOfTotal): pop = prices.pop(0) fivePercentPrice = pop[0] if (DEBUG): print "\tBought: ",bought,"/",fivePercentOfTotal print "\t\tNext pop: ",fivePercentPrice," ISK, vol: ",pop[1] if (fivePercentOfTotal > ( bought + buyPrice[fivePercentPrice])): boughtPrice += buyPrice[fivePercentPrice]*fivePercentPrice; bought += buyPrice[fivePercentPrice]; if (DEBUG): print "\t\tHave not met goal. Bought:",bought else: diff = fivePercentOfTotal - bought; boughtPrice += fivePercentPrice*diff; bought = fivePercentOfTotal; if (DEBUG): print "\t\tGoal met. Bought:",bought fiveAverageBuyPrice = boughtPrice/bought; if (DEBUG): print "Average buying price (first 5% of volume):",fiveAverageBuyPrice data['orders']['buy'] = [ "%.2f" % fiveAverageBuyPrice, numberOfBuyItems] mc.set('emdr-'+str(VERSION)+'-'+str(rowsets['regionID'])+'-'+str(typeID), simplejson.dumps(data)); if (DEBUG): print 'SUCCESS: emdr-'+str(VERSION)+'-'+str(rowsets['regionID'])+'-'+str(typeID),simplejson.dumps(data) f += 1
def __init__(self, filename): #self.db = db self.useless = ('', 'created_by', 'source', 'editor', 'ele', 'time', 'editor', 'author', 'hdop', 'pdop', 'sat', 'speed', 'fix', 'course', 'converted_by', 'attribution', 'upload_tag', 'history') self.User = {} self.currentId = 0 self.goodUser = ('') self.TilesCreated = 0 self.Nodes = {} self.NodesToWays = {} self.NodesToWaysR = {} self.Ways = {} self.Borders = {} self.Places = {} self.DatesGraph = {} self.RoutableWays = set([]) self.BPlaces = {} self.currentUser = "" # user who created latest obj self.currentType = "" # latest obj type self.NodesCount = 0 self.PlacesCount = 0 self.RelationsCount = 0 self.Address = {} self.currentMembers = [] self.currentRelID = 0 self.WaysCount = 0 self.prevtime = time.time() self.prevcount = 0 self.BorderList = [] self.BordersCount = 0 self.MinLaTile = 100000000 self.MaxLaTile = -100000000 self.MinLoTile = 100000000 self.MaxLoTile = -10000000 self.FillTile = 0 self.TagsList = {} self.ZoomLevel = 200.0 #130 self.Tiles = {} self.FixMe = _('<b>FixMe</b>') self.AddrRelLinks = '' self.MaxTagsElems = 20 self.LastChange = '' self.FirstChange = 'z' self.WaysPassed = set([]) self.CountryName = CountryName self.packlatlon = lambda lat,lon: lon/abs(lon)*(int(abs(lat*1000000))*2+((lat/abs(lat)+1)/2)+abs(lon/360)) self.unpacklatlon = lambda n: ( -(-1)**(int(n)%2)* abs(int(n/2)/1000000.), (n/abs(n))*(abs(n)%1)*360) htmlheader = "<html><head><title>%%s%s %s: %%s</title><meta http-equiv=Content-Type content=\"text/html; charset=UTF-8\" /><script src=\"/stat/sorttable.js\"></script></head><body>" % (_("OSM Stats"),self.CountryName) htmltablestart = "<table class=\"sortable\" style=\"width: 100%; border: 1px solid gray\" border=1 width=100%>" def htmltablerow (cols): tr = "<tr>" for col in cols: if type(col) == type(float()): tr = tr + "<td align=\"right\">%.3f</td>" % (col,) else: tr = tr + "<td>%s</td>" % (col,) return tr+"</tr>\n" if ifmod('warnings'): self.warningsFile = open('warnings.html','w') self.warningsFile.write(htmlheader % (" ", _("Warnings"))) self.warningsFile.write(htmltablestart); self.warningsFile.write(htmltablerow((_("Warning type"), _("Way")))) try: parser = make_parser() parser.setContentHandler(self) parser.parse(filename) except xml.sax._exceptions.SAXParseException: sys.stderr.write( _("Error loading %s\n") % filename) ## Dealing with BorderList if ifmod('borders'): self.BorderList.sort((lambda x,y: (int(y[0]>x[0])*2)-1)) self.unnamedBordersFile = open('unnamed.html','w') in_unnamed = 0 in_named = 0 perpage = 500 for tt in self.BorderList: dens, area, nodesInWay, bbox = tt if (in_unnamed%perpage)==0: self.unnamedBordersFile.write('</table></body></html>') self.unnamedBordersFile.close() self.unnamedBordersFile = open('unnamed%s.html'%(int(in_unnamed/perpage)+1),'w') self.unnamedBordersFile.write(htmlheader % (" ", _("Unnamed borders")) ) self.unnamedBordersFile.write(htmltablestart) self.unnamedBordersFile.write(htmltablerow(( _("Area [km<sup>2</sup>]"),_("Nodes"),_("Density [nodes/km<sup>2</sup>]"),_("Link")))) in_unnamed += 1 bf = self.unnamedBordersFile bf.write(htmltablerow((area, nodesInWay, dens, linkBbox(bbox)))) self.unnamedBordersFile.write('</table></body></html>') self.unnamedBordersFile.close() filename = open('unnamed.html','w') filename.write(htmlheader % (" ", _("Unnamed borders pages list")) ) filename.write("<body><h2>%s</h2><ul>" % _("Unnamed borders pages list")) for i in range(1,int(in_unnamed/perpage)+2): filename.write('<li><a href=unnamed%s.html>%s %s</a>'%(i,_("Page"),i)) filename.write("</ul></body></html>") filename.close() if ifmod('addr'): dirname = "addr" if not os.path.isdir("./" + dirname + "/"): os.mkdir("./" + dirname + "/") filename = open('addr/index.html','w') filesStarted = ['addr/index.html'] filename.write(htmlheader % (" ", _("Postal address relations")) ) def AddrTreeRecurse (top, wayback="» "): if top in self.WaysPassed: if ifmod('warnings'): self.warningsFile.write (htmltablerow((_("double-pass on address relation"),top))) return wayback = wayback + "%s<a href=%s.html>%s</a> » " % (linkRelationIcon(top),top, self.Address[top]["tags"].get("name",top) ) filename = open('addr/%s.html'%(top,),'w') filename.write(htmlheader % ("", self.Address[top]["tags"].get("name", "Unknown address"))) filename.write(" %s<p>" % wayback) nodes = 0 ways = 0 rels = 0 self.WaysPassed.add(top) for ctype, child in self.Address[top]["child"]: if ctype == 'node': if nodes == 0: filename.write(htmltablestart) filename.write(htmltablerow((_("Name"),_("Density"),_("Area"),_("Nodes"),_("Type"),_("Link")))) nodes += 1 if int(child) in self.BPlaces: area,nodesIn, bbox, tags = self.BPlaces[int(child)] filename.write(htmltablerow((\ tags["name"], \ 1.* nodesIn / area, \ area, \ nodesIn, \ tags["place"], \ linkBboxMarker(bbox, (tags["lat"],tags["lon"])), \ ))) elif ifmod('places') and (int(child) in self.Places): filename.write(htmltablerow((\ self.Places[int(child)].get("name", child), \ " ", \ " ", \ " ", \ self.Places[int(child)].get("place", _("??? unknown")), \ linkNode(child), \ ))) else: filename.write(htmltablerow((linkNode(child),) )) filename.write("</table>") for ctype, child in self.Address[top]["child"]: if ctype == 'way': filename.write(linkWay(child)+" ") for ctype, child in self.Address[top]["child"]: if ctype == 'relation': if rels == 0: filename.write(_("<h3>Relations</h3>")) rels += 1 if child in self.Address: filename.write("<br>%s<a href=%s.html>%s</a> " %(linkRelationIcon(child),child, self.Address[child]["tags"].get("name",child) )) AddrTreeRecurse(child,wayback) else: filename.write(linkRelation(child)) #print self.BPlaces ##filename.write(str(self.Address[top]["child"])) filename.write("</body></html>") filename.close() while self.Address: top = self.Address.keys()[0] while self.Address[top]["parent"] in self.Address: top = self.Address[top]["parent"] AddrTreeRecurse (top) filename.write("<a href=%s.html>%s</a> " %(top, self.Address[top]["tags"].get("name",top) )) self.AddrRelLinks = self.AddrRelLinks + "<a href=addr/%s.html>%s</a> " %(top, self.Address[top]["tags"].get("name",top) ) #print self.Address[top] for aaaaa in self.WaysPassed: del self.Address[aaaaa] self.WaysPassed = set([]) filename.close() for toClose in filesStarted: filename = open(toClose,'a') filename.write("</body></html>") filename.close() # end ifmod('addr') if ifmod('tags'): filename = open("tags.html",'w') taglist = self.TagsList.keys() taglist.sort() alphabet = [] for k in taglist: filename.close() if k: alpha = k[0] else: alpha = "" if alpha not in ("a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"): alpha = "other" tt = _("other") if alpha not in alphabet: alphabet.append(alpha) filename = open("tags-%s.html"%(alpha,),'w') filename.write(htmlheader % ("[%s]" % _(alpha), _("Tags"))) filename.write(""" <script type="text/javascript"> var d = document; var offsetfromcursorY=15 // y offset of tooltip var ie=d.all && !window.opera; var ns6=d.getElementById && !d.all; var tipobj,op=0; var left,top=0; function tt(el,txt) { if (d.getElementById('mess').style.visibility=='hidden'){ tipobj=d.getElementById('mess'); e = el; tipobj.innerHTML = '<div style="float:right;align:right;cursor:pointer;cursor:hand;border-bottom:1px solid grey;padding:2px;border-left:1px solid black;" onclick="hide_info(this)">x</div>'+ txt; el.onmousemove=positiontip;};} function hide_info(el) {op=0;tipobj.style.opacity = op; d.getElementById('mess').style.visibility='hidden';} function ietruebody(){return (d.compatMode && d.compatMode!="BackCompat")? d.documentElement : d.body} function positiontip(e) { if (d.getElementById('mess').style.visibility=='hidden'){ var curX=(ns6)?e.pageX : event.clientX+ietruebody().scrollLeft; var curY=(ns6)?e.pageY : event.clientY+ietruebody().scrollTop; var winwidth=ie? ietruebody().clientWidth : window.innerWidth-20 var winheight=ie? ietruebody().clientHeight : window.innerHeight-20 var rightedge=ie? winwidth-event.clientX : winwidth-e.clientX; var bottomedge=ie? winheight-event.clientY-offsetfromcursorY : winheight-e.clientY-offsetfromcursorY; ol = left; ot = top; left=0;top=0; if (rightedge < tipobj.offsetWidth) left=curX-tipobj.offsetWidth; else left=curX; if (bottomedge < tipobj.offsetHeight) top=curY-tipobj.offsetHeight-offsetfromcursorY else top=curY+offsetfromcursorY; if (left <= 0){left = 0;}; if (top <= 0){top = 0;}; if ( ((ol-left)*(ol-left)+(ot-top)*(ot-top)) >400){ tipobj.style.left = left; tipobj.style.top = top; } else{ol=left;ot=top;} };} function ap(el) {el.onmousemove='';op=tipobj.style.opacity;if (op==0){ op = 1; tipobj.style.opacity = op; tipobj.style.visibility="visible";}; if(op < 1) {op += 0.1; tipobj.style.opacity = op;tipobj.style.filter = 'alpha(opacity='+op*100+')';t = setTimeout('appear()', 30);};}</script> <style>span{cursor:pointer;cursor:hand;} span:hover{color:navy}</style> </head><body><div id="mess" style="visibility: hidden; position:absolute; background-color:white; border:1px dotted red; width:400px; height: 350px; overflow:auto "></div><table class="sortable" style="width: 100%; border: 1px solid gray" border=1 width=100%> <tr><td>k<td>num<td>v's""") else: filename = open("tags-%s.html"%(alpha,),'a') filename.write( "<tr><td>%s<td>%s<td>" % \ (k, self.TagsList[k][1] )) vallist = self.TagsList[k].keys() vallist.sort() for v in vallist: if v is not 1: uslist = self.TagsList[k][v].keys() uslist.sort() z = " " if self.MaxTagsElems > len(self.TagsList[k][v][objN("way")]): if self.TagsList[k][v][objN("way")]: z = z + _("Ways: ") for p in self.TagsList[k][v][objN("way")]: z = z + linkWay(p)+" " if self.MaxTagsElems > len(self.TagsList[k][v][objN("node")]): if self.TagsList[k][v][objN("node")]: z = z +"<br>"+ _("Nodes: ") for p in self.TagsList[k][v][objN("node")]: z = z + linkNode(p)+" " if self.MaxTagsElems > len(self.TagsList[k][v][objN("relation")]): if self.TagsList[k][v][objN("relation")]: z = z + "<br>"+_("Relations: ") for p in self.TagsList[k][v][objN("relation")]: z = z + linkRelation(p)+" " for p in uslist: if p not in (1, 2, 3, 5): z = z + "<br>%s: %s "% (linkUser(self.User[p]["Name"]), self.TagsList[k][v][p]) filename.write( """<span onmouseover="tt(this,'%s<br>')" onclick="ap(this)">%s (%s)</span>, """ % \ (z, v, self.TagsList[k][v][1] )) filename.close() for alpha in alphabet: filename = open("tags-%s.html"%(alpha,),'a') filename.write("</table>") for link in alphabet: filename.write("\n<a href=tags-%s.html> %s </a>| " % (link,_(link))) filename.write("</body></html>") filename.close() ## end ifmod('tags') userlist = self.User.keys() userlist.sort(lambda x,y: self.User[y]["Nodes"]-self.User[x]["Nodes"]) if ifmod('users'): kml = open('users.kml','w') kml.write("""<?xml version="1.0" encoding="UTF-8" standalone="no"?> <kml xmlns="http://earth.google.com/kml/2.0"> <Document> <Style id="rss"> <IconStyle> <scale>0.5</scale> <Icon> <href>http://komzpa.net/favicon.ico</href> </Icon> </IconStyle> </Style> <Folder>""") kml.write("<name>%s users</name>" % (self.CountryName) ) usersFile = open('users.html','w') usersFile.write(htmlheader % (" ",_("Users"))) usersFile.write(htmltablestart) usersFile.write(htmltablerow((_("User"), _("Nodes"), _("Ways"), _("Relations"), _("%"), _("Speed [obj/day]"), _("First known edit"), _("Latest known edit"), _("Links")))) if ifmod('users-csv'): usersFileCsv = open('users.csv','w') for user in self.User.values(): if user["LastDate"] > self.LastChange: self.LastChange = user["LastDate"] ttt = """<Placemark> <name>%s</name> <description><h2><a href="http://openstreetmap.org/user/%s">%s</a></h2><br> Nodes: %s Ways: %s Relations: %s Since: %s </description> <styleUrl>#rss</styleUrl> <Point><coordinates>%s, %s</coordinates></Point> </Placemark>""" if ifmod('users-csv'): usersFileCsv.write("uid;name;nodes;ways;relations\r\n") ## end ifmod('users') for user in userlist: if self.FirstChange > self.User[user]["FirstDate"]: self.FirstChange = self.User[user]["FirstDate"] CHLink = " " if ifmod('calchome'): CSList = [] for i in self.User[user]["changesets"].values(): CSList.append([i["lat"],i["lon"],i["num"]]) if CSList: lat, lon = tuple(calcHome(CSList)) escapedName = cgi.escape (self.User[user]["Name"]) escapedName = escapedName.replace("+","%20") CHLink = "<a href=\"http://openstreetmap.org/?mlat=%f&mlon=%f&minlat=%s&minlon=%s&maxlat=%s&maxlon=%s&box=yes\">%s</a>" % \ (lat, lon, self.User[user]["minlat"], self.User[user]["minlon"], self.User[user]["maxlat"], self.User[user]["maxlon"], _("CalcedHome")) kml.write(ttt % (escapedName, escapedName, escapedName, self.User[user]["Nodes"], \ self.User[user]["Ways"], \ self.User[user]["Relations"], \ self.User[user]["FirstDate"], \ lon, lat) ) speed = ((self.User[user]["Nodes"]+self.User[user]["Ways"]+self.User[user]["Relations"])/(parse(self.LastChange)-parse(self.User[user]["FirstDate"])+1))*86400 if ifmod('users'): usersFile.write(htmltablerow(((linkUser(self.User[user]["Name"]), \ self.User[user]["Nodes"], \ self.User[user]["Ways"], \ self.User[user]["Relations"], \ 100. * (1.* self.User[user]["Nodes"] / self.NodesCount + 1. * self.User[user]["Ways"] / self.WaysCount) / 2,\ speed,\ self.User[user]["FirstDate"], \ self.User[user]["LastDate"], \ CHLink , \ )))) if ifmod('users-csv'): usersFileCsv.write("\"%s\";\"%s\";\"%d\";\"%d\";\"%d\"\r\n" % \ (user, self.User[user]["Name"], self.User[user]["Nodes"], self.User[user]["Ways"], self.User[user]["Relations"])) if ifmod('users'): kml.write("""</Folder> </Document> </kml>""") kml.close() usersFile.write( "</table></body></html>") usersFile.close() if ifmod('users-csv'): usersFileCsv.close() ## end ifmod('users') if ifmod('places'): localLangs = _("name:be,name:en,name:ru") localLangs = localLangs.split(",") localLangs = tuple (localLangs) pointsFile = open('points.html','w') pointsFile.write(htmlheader % ("",_("Localities"))) pointsFile.write(htmltablestart) pointsFile.write(htmltablerow((_("Name"),) + localLangs + ( _("Type"), _("Link")))) for PointNum in self.Places.keys(): place = self.Places[PointNum] localLangsT = [] for langtag in localLangs: localLangsT.append(place.get(langtag, self.FixMe)) localLangsT = tuple (localLangsT) pointsFile.write(htmltablerow( (place.get("name",self.FixMe),\ ) + localLangsT + ( \ place['place'], linkMarker(place['lat'], place['lon'], _("goto") ) ) )) pointsFile.write( "</table></body></html>") pointsFile.close() ## end ifmod('places') indexFile = open('index.html','w') indexFile.write(htmlheader % (_("Welcome to :"),"")) if ifmod('density'): indexFile.write("<a href=density.png><img src='density.small.jpg' align='right'></a>"); indexFile.write(_("<h3>OSM Statistics for %s</h3>") % self.CountryName) indexFile.write("<ul>") if ifmod('users'): indexFile.write(_("<li><a href=users.html>users</a></li>")) if ifmod('places'): indexFile.write(_("<li><a href=points.html>points</a></li>")) if ifmod('addr'): indexFile.write(_("<li>address relations: %s</li>") % self.AddrRelLinks ) if ifmod('warnings'): indexFile.write(_("<li><a href=warnings.html>warnings</a></li>")) # FIXME what is it? #if ifmod('borders'): # indexFile.write(_("<li><a href=borders.html>borders</a></li>")) if ifmod('borders'): indexFile.write(_("<li><a href=unnamed.html>unnamed borders</a></li>")) if ifmod('routing'): indexFile.write(_("<li><a href=route.html>routing subgraphs</a></li>")) if ifmod('tags'): indexFile.write(_("<li>tags: ")) for alpha in alphabet: indexFile.write("\n<a href=tags-%s.html>%s</a> | " % (alpha,_(alpha))) indexFile.write( "</ul>") if ifmod('density'): best = (0,0) for i in self.Tiles.keys(): for j in self.Tiles[i].keys(): if len(self.Tiles[i][j]) > self.FillTile: self.FillTile = len(self.Tiles[i][j]) best = (i,j) self.DatesCSV = open('graph.csv','w') daysSorted = self.DatesGraph.keys() daysSorted.sort() lastDaysActivity = 0 for day in daysSorted: self.DatesCSV.write("%s\t%s\t%s\t%s\n"%(day,self.DatesGraph[day][0],self.DatesGraph[day][1],self.DatesGraph[day][2]) ) lastDay = daysSorted[-1] for day in daysSorted[-15:]: if (abs(parse(lastDay) - parse(day))) <= (2* 604800.0): lastDaysActivity += self.DatesGraph[day][0] + self.DatesGraph[day][1] + self.DatesGraph[day][2] lastDaysActivity = lastDaysActivity / 14. self.DatesCSV.close() speed = (self.NodesCount+self.WaysCount+self.RelationsCount)/(parse(self.LastChange)-parse(self.FirstChange))*86400 indexFile.write( _("<p>Average mapping speed: <b>%f</b> obj/day</p>") % (speed, )) if ifmod('density'): indexFile.write( _("<p>Tiles fill(avg/max): <b>%f</b>/<b>%s</b></p>") % (self.NodesCount/(self.TilesCreated*1.),self.FillTile, )) indexFile.write( _("<p>Number of Nodes: <b>%s</b></p>") % (self.NodesCount, )) indexFile.write( _("<p>Number of Ways: <b>%s</b></p>") % (self.WaysCount, )) indexFile.write( _("<p>Number of Relations: <b>%s</b></p>") % (self.RelationsCount, )) if ifmod('places') or ifmod('borders'): # FIXME FIXME FIXME indexFile.write( _("<p>Number of Borders/Places: <b>%s</b>/<b>%s</b></p>") % (self.BordersCount, self.PlacesCount)) indexFile.write( _("<p>First known update: <b>%s</b></p>") % (self.FirstChange, )) indexFile.write( _("<p>Last update: <b>%s</b></p>") % (self.LastChange, )) indexFile.write("</body></html>") indexFile.close() filename = open('oneline.inc.html','w') if ifmod('density'): filename.write("%s</a><td>%s</td><td>%s</td><td>%.3f</td><td>%s</td><td>%.3f</td><td>%s</td></tr>" % (self.CountryName, self.LastChange, len(self.User), speed, \ " ",lastDaysActivity,\ " ", \ )) else: filename.write("%s</a><td>%s</td><td>%s</td><td>%.3f</td><td>%s</td><td>%.3f</td><td>%s</td></tr>" % (self.CountryName, self.LastChange, len(self.User), speed, \ " ",lastDaysActivity,\ " ", \ )) filename.close() ### Making routing if ifmod('routing'): self.routeFile = open('route.html','w') self.routeFile.write(htmlheader % ("",_("Routes"))) self.routeFile.write(htmltablestart) self.routeFile.write(htmltablerow((_("Number of ways"),_("First way in group")))) wayQ = set([]) wayP = set([]) numRoute = 0 self.WayGroups = {0:"",} self.WayGroupsId = {0:"",} numR = len(self.RoutableWays) numP = 0 numPrev = 0 starttime = time.time() prevtime = starttime while self.RoutableWays: if time.time()-prevtime >= 6: print "still %s %s, %s, %s%%, ETA:%ss (%s/s)"%(len(wayQ),numP, numR-numP, 1.*numP/numR*100., (numR-numP)*(time.time()-prevtime)/(numP-numPrev), (numP-numPrev)/(time.time()-prevtime)) prevtime=time.time() time.sleep(1) numPrev = numP if not wayQ: popped = self.RoutableWays.pop() wayQ.add(popped) wayP.update(wayQ) sys.stderr.write( "NewRoutable %d\n" % numRoute ) numRoute += 1 self.WayGroups[numRoute] = 1 self.WayGroupsId[numRoute] = popped way = self.Ways[wayQ.pop()] #print way, wayQ for node in way: if len(self.NodesToWays[node]) != 1: for tway in self.NodesToWays[node]: if tway not in wayP: if tway in self.RoutableWays: wayQ.add(tway) wayP.add(tway) self.WayGroups[numRoute] += 1 self.RoutableWays.remove(tway) numP += 1 sys.stderr.write("WayGroups = %s\n" % repr(self.WayGroups)) for i in range(1,numRoute): self.routeFile.write(htmltablerow((self.WayGroups[i],linkWayMap(self.WayGroupsId[i])))) self.routeFile.write('</table></body></html>') self.routeFile.close() ## Making pretty image if ifmod('density'): try: from PIL import Image, ImageDraw density = self.NodesCount/self.TilesCreated*1. sys.stderr.write('Generating picture') width = self.MaxLoTile-self.MinLoTile height = self.MaxLaTile-self.MinLaTile img = Image.new("RGB", (self.MaxLoTile-self.MinLoTile+1,self.MaxLaTile-self.MinLaTile+2), (255,255,255)) gamma=0.5 #draw = ImageDraw.Draw(img) #draw.text((0,0), "%s"%(self.CountryName) ,fill= (0,0,0)) png = img.load() # for i in self.Tiles.keys(): # for j in self.Tiles[i].keys(): # if len(self.Tiles[i][j]) > self.FillTile: # self.FillTile = len(self.Tiles[i][j]) for i in self.Tiles.keys(): for j in self.Tiles[i].keys(): c = len(self.Tiles[i][j])*1./self.FillTile c = c**gamma t = int(c*255) png[j-self.MinLoTile,-i+self.MaxLaTile] = (256-t,256-t,t) for i in range(0, width+1): c = i/(width+1*1.) c = c**gamma t = int(c*255) png[i,height+1] = (256-t,256-t,t) if height>width: oh = height height = 300 width = int(width*(1.*height/oh)) else: ow = width width = 300 height = int(height*(1.*width/ow)) img.save("density.png") print width, height img = img.resize((width, height),Image.ANTIALIAS) img.save("density.small.jpg") sys.stderr.write('.\n') except ImportError: # PIL import error sys.stderr.write("PIL import error, skip picture generation\n") pass ## end ifmod('density') if ifmod('warnings'): self.warningsFile.write('</table></body></html>') self.warningsFile.close()