Esempio n. 1
0
    def weather(self):
        dummy,routes=get_route(tripuser(),request.params['tripname'])

        ret=[]
        alts=request.params.get('alts','')
        if alts==None:
            altvec=[]
        else:
            altvec=alts.split(",")
        for route,altitude in zip(routes,altvec):
             #print("Looking for waypoint: %s"%(way.pos,))
             try:
                mapper.parse_elev(altitude)
             except mapper.NotAnAltitude,cause:
                 ret.append(['',''])                 
                 continue #skip this alt
             #N+1 selects....
             merc1=mapper.latlon2merc(mapper.from_str(route.a.pos),14)
             merc2=mapper.latlon2merc(mapper.from_str(route.a.pos),14)
             center=(0.5*(merc1[0]+merc2[0]),0.5*(merc1[1]+merc2[1]))
             lat,lon=mapper.merc2latlon(center,14)
             #print "Fetching weather for %s,%s, %s"%(lat,lon,route.altitude)
             when=route.depart_dt+(route.arrive_dt-route.depart_dt)/2
             dummy1,dummy2,we=gfs_weather.get_prognosis(when)
             if we==None:
                 return ""; #Fail completely we don't have the weather here. We only succeed if we have weather for all parts of the journey.
             else:
                 try:
                     wi=we.get_wind(lat,lon,mapper.parse_elev(altitude))
                 except:
                     print traceback.format_exc()
                     return ""
                 #print "Got winds:",wi
                 ret.append([wi['direction'],wi['knots']])
Esempio n. 2
0
    def weather(self):
        dummy, routes = get_route(tripuser(), request.params['tripname'])

        ret = []
        alts = request.params.get('alts', '')
        if alts == None:
            altvec = []
        else:
            altvec = alts.split(",")
        for route, altitude in zip(routes, altvec):
            #print("Looking for waypoint: %s"%(way.pos,))
            try:
                mapper.parse_elev(altitude)
            except mapper.NotAnAltitude, cause:
                ret.append(['', ''])
                continue  #skip this alt
            #N+1 selects....
            merc1 = mapper.latlon2merc(mapper.from_str(route.a.pos), 14)
            merc2 = mapper.latlon2merc(mapper.from_str(route.a.pos), 14)
            center = (0.5 * (merc1[0] + merc2[0]), 0.5 * (merc1[1] + merc2[1]))
            lat, lon = mapper.merc2latlon(center, 14)
            #print "Fetching weather for %s,%s, %s"%(lat,lon,route.altitude)
            when = route.depart_dt + (route.arrive_dt - route.depart_dt) / 2
            dummy1, dummy2, we = gfs_weather.get_prognosis(when)
            if we == None:
                return ""
                #Fail completely we don't have the weather here. We only succeed if we have weather for all parts of the journey.
            else:
                try:
                    wi = we.get_wind(lat, lon, mapper.parse_elev(altitude))
                except:
                    print traceback.format_exc()
                    return ""
                #print "Got winds:",wi
                ret.append([wi['direction'], wi['knots']])
Esempio n. 3
0
def ev_parse_obst():
    cur_airac = get_cur_airac()
    url = "/eAIPfiles/%s-AIRAC/html/eAIP/EV-ENR-5.4-en-GB.html" % (cur_airac,)
    # url="/EV-ENR-5.4-en-GB.html"
    parser = lxml.html.HTMLParser()
    data, date = fetchdata.getdata(url, country="ev")
    parser.feed(data)
    tree = parser.close()
    got_fir = False
    res = []
    for table in tree.xpath("//table"):
        for row in table.xpath(".//tr"):
            tds = row.xpath(".//td")
            if len(tds) != 5:
                continue
            name, type, coord, elev, light = [alltext(x) for x in tds]
            elev, height = elev.split("/")
            res.append(
                dict(
                    name=name,
                    pos=mapper.parsecoord(coord),
                    height=mapper.parse_elev(height.strip()),
                    elev=mapper.parse_elev(elev),
                    lighting=light,
                    kind=type,
                )
            )
    return res
 def parse_alt(line):
     A,alt=line.split(" ",1)
     assert A in ("AL","AH")
     alt=alt.strip()
     if alt=="":
         return "UNL"
     mapper.parse_elev(alt)
     return alt
Esempio n. 5
0
 def parse_alt(line):
     A, alt = line.split(" ", 1)
     assert A in ("AL", "AH")
     alt = alt.strip()
     if alt == "":
         return "UNL"
     mapper.parse_elev(alt)
     return alt
Esempio n. 6
0
def get_stuff_near_route(rts,items,dist,vertdist):
    for item in items:
        try:
            itemmerc=mapper.latlon2merc(mapper.from_str(item['pos']),13)
        except Exception:
            print "Bad coord:",item['pos']
            continue
        itemv=Vertex(int(itemmerc[0]),int(itemmerc[1]))
        onenm=mapper.approx_scale(itemmerc,13,1.0)
        for rt in rts:
            if rt.dt==None: continue
            #print "========================================="
            av=Vertex(int(rt.subposa[0]),int(rt.subposa[1]))
            bv=Vertex(int(rt.subposb[0]),int(rt.subposb[1]))
            l=Line(av,bv)
            linelen=(bv-av).approxlength()
            actualclosest=l.approx_closest(itemv)
            #print item['name'],"A: ",av,"B: ",bv,"clo:",actualclosest
            actualdist=(actualclosest-itemv).approxlength()/onenm
            #print "Actualdist: ",actualdist
            ls=(actualclosest-av).approxlength()
            #print "Length from start:",ls
            #print "Linelen:",linelen
            if linelen>1e-3:
                along=ls/linelen
            else:
                along=0
            #print "Along:",along
            #print "Startalt:",rt.startalt," endalt: ",rt.endalt
            alongnm=rt.d*along
            alongnm_a=rt.relstartd+alongnm
            #print "NM from ",rt.a.waypoint," is ",alongnm_a
            closealt=rt.startalt+(rt.endalt-rt.startalt)*along
            #print "Altitude at point: ",closealt, " before: ",rt.a.waypoint,rt.b.waypoint
            altmargin=0
            if 'elev' in item:
                itemalt=mapper.parse_elev(item['elev'])
                altmargin=closealt-itemalt
            else:
                itemalt=None
                altmargin=0
            if actualdist<dist and altmargin<vertdist:
                bear=mapper.approx_bearing_vec(actualclosest,itemv)            
                d=dict(item)
                #print "Yielding."
                d['name']=d['kind']+': ' +d['name']
                d['dist_from_a']=alongnm_a
                d['dist_from_b']=rt.outer_d-alongnm_a
                d['dir_from_a']=describe_dir(rt.tt)
                d['dir_from_b']=describe_dir((rt.tt+180.0)%360.0)
                d['dist']=actualdist
                d['bearing']=bear
                d['elevf']=itemalt
                if itemalt!=None:
                    d['vertmargin']=altmargin
                d['closestalt']=closealt
                d['a']=rt.a
                d['b']=rt.b
                d['id']=rt.a.id
                yield d
Esempio n. 7
0
 def get_wind(self,elev):
     if type(elev)!=int:
         elev=mapper.parse_elev(elev)
     ielev=int(elev)
     #print "Ielev: ",ielev
     twothousand,fl50,fl100=self.winds['2000'],self.winds['FL50'],self.winds['FL100']
     def dcos(x):
         return math.cos(x/(180.0/math.pi))
     def dsin(x):
         return math.sin(x/(180.0/math.pi))
     def ipol(a,b,f):
         ax=dcos(a['direction'])*a['knots']
         ay=dsin(a['direction'])*a['knots']
         bx=dcos(b['direction'])*b['knots']
         by=dsin(b['direction'])*b['knots']
         x=(ax*(1.0-f)+bx*f)
         y=(ay*(1.0-f)+by*f)
         direction=(180.0/math.pi)*math.atan2(y,x)
         if direction<0: direction+=360.0
         knots=math.sqrt(x**2+y**2)
         res=dict(direction=direction,knots=knots)
         #print "\nInterpolated %s and %s with f=%s into %s\n"%(a,b,f,res)
         return res
     
     if ielev<2000:
         return dict(knots=twothousand['knots'],direction=twothousand['direction'])
     elif ielev<5000:
         return ipol(twothousand,fl50,(ielev-2000.0)/(5000.0-2000.0))
     elif ielev<10000:
         return ipol(fl50,fl100,(ielev-5000.0)/(10000.0-5000.0))
     elif ielev>=10000:
         return dict(knots=fl100['knots'],direction=fl100['direction'])
     return dict(knots=0,direction=0)
Esempio n. 8
0
        def get(what, a, b):
            #print "A:<%s>"%(what,),a.pos,b.pos
            route = wp2route.get((a.id, b.id), None)

            if route:
                if what in ['TT', 'D', 'Var']:
                    bear, dist = route.tt, route.d  #mapper.bearing_and_distance(a.pos,b.pos)
                    #print "Bear,dist:",bear,dist
                    if what == 'TT':
                        return "%03.0f" % (bear, )
                    elif what == 'D':
                        return "%.1f" % (dist, )
                    elif what == 'Var':
                        var = route.variation
                        return "%+.0f" % (round(var), )
                if what in ['W', 'V', 'Alt', 'TAS', 'Dev']:
                    #routes=list(meta.Session.query(Route).filter(sa.and_(
                    #    Route.user==tripuser(),Route.trip==session['current_trip'],
                    #    Route.waypoint1==a.id,Route.waypoint2==b.id)).all())
                    if what == 'W':
                        return "%03.0f" % (route.winddir)
                    elif what == 'V':
                        return "%.0f" % (route.windvel)
                    elif what == 'Alt':
                        try:
                            #print "Parsing elev:",route.altitude
                            mapper.parse_elev(route.altitude)
                        except Exception, cause:
                            #print "couldn't parse elev:",route.altitude
                            return "1500"
                        return route.altitude
                    elif what == 'Dev':
                        #print "Dev is:",repr(route.deviation)
                        return "%.0f" % (
                            route.deviation) if route.deviation != None else ''
                    elif what == 'TAS':
                        #print "A:<%s>"%(what,),a.id,b.id,route.tas,id(route)
                        if not route.tas:
                            return 75
                        return "%.0f" % (route.tas)
                return ""
Esempio n. 9
0
 def get(what,a,b):
     #print "A:<%s>"%(what,),a.pos,b.pos
     route=wp2route.get((a.id,b.id),None)
     
     if route:                
         if what in ['TT','D','Var']:
             bear,dist=route.tt,route.d #mapper.bearing_and_distance(a.pos,b.pos)
             #print "Bear,dist:",bear,dist
             if what=='TT':
                 return "%03.0f"%(bear,)
             elif what=='D':
                 return "%.1f"%(dist,)
             elif what=='Var':
                 var=route.variation
                 return "%+.0f"%(round(var),)
         if what in ['W','V','Alt','TAS','Dev']:
             #routes=list(meta.Session.query(Route).filter(sa.and_(
             #    Route.user==tripuser(),Route.trip==session['current_trip'],
             #    Route.waypoint1==a.id,Route.waypoint2==b.id)).all())
             if what=='W':
                 return "%03.0f"%(route.winddir)
             elif what=='V':
                 return "%.0f"%(route.windvel)
             elif what=='Alt':
                 try:
                     #print "Parsing elev:",route.altitude
                     mapper.parse_elev(route.altitude)
                 except Exception,cause:
                     #print "couldn't parse elev:",route.altitude
                     return "1500"
                 return route.altitude                    
             elif what=='Dev':
                 #print "Dev is:",repr(route.deviation)
                 return "%.0f"%(route.deviation) if route.deviation!=None else ''   
             elif what=='TAS':
                 #print "A:<%s>"%(what,),a.id,b.id,route.tas,id(route)
                 if not route.tas:
                     return 75                        
                 return "%.0f"%(route.tas)
         return ""            
Esempio n. 10
0
def parse_elev_for_sort_purposes(elev):
    try:
    #print "elev",elev    
        elev=elev.lower()
        if elev=="gnd":
            return 0
        if elev.count("/"):
            elev,dummy=elev.split("/")
        if elev.endswith("gnd"):
            elev=elev[:-3]
        
        return mapper.parse_elev(elev)
    except:
        return 0
Esempio n. 11
0
def parse_elev_for_sort_purposes(elev):
    try:
        #print "elev",elev
        elev = elev.lower()
        if elev == "gnd":
            return 0
        if elev.count("/"):
            elev, dummy = elev.split("/")
        if elev.endswith("gnd"):
            elev = elev[:-3]

        return mapper.parse_elev(elev)
    except:
        return 0
Esempio n. 12
0
def ev_parse_obst():
    cur_airac = get_cur_airac()
    url = "/eAIPfiles/%s-AIRAC/html/eAIP/EV-ENR-5.4-en-GB.html" % (cur_airac, )
    #url="/EV-ENR-5.4-en-GB.html"
    parser = lxml.html.HTMLParser()
    data, date = fetchdata.getdata(url, country="ev")
    parser.feed(data)
    tree = parser.close()
    got_fir = False
    res = []
    for table in tree.xpath("//table"):
        for row in table.xpath(".//tr"):
            tds = row.xpath(".//td")
            if len(tds) != 5: continue
            name, type, coord, elev, light = [alltext(x) for x in tds]
            elev, height = elev.split("/")
            res.append(
                dict(name=name,
                     pos=mapper.parsecoord(coord),
                     height=mapper.parse_elev(height.strip()),
                     elev=mapper.parse_elev(elev),
                     lighting=light,
                     kind=type))
    return res
Esempio n. 13
0
 def classify(item):
     #print item
     vertlimit = 1000
     if item.get('kind', None) == 'lowsun':
         return "#ffffb0"
     if item.get('kind', None) == 'terrain':
         vertlimit = 500
     try:
         margin = item['closestalt'] - mapper.parse_elev(item['elev'])
     except Exception:
         return "#0000ff"  #Unknown margin, unknown height
     if item['dist'] > 0.6 / 1.852:
         return None  #Not really too close anyway
     if margin < 0:
         return "#ff3030"
     if margin < vertlimit:
         return "#ffb0b0"
     return None
Esempio n. 14
0
 def classify(item):
     #print item
     vertlimit=1000
     if item.get('kind',None)=='lowsun':
         return "#ffffb0"            
     if item.get('kind',None)=='terrain':
         vertlimit=500                
     try:
         margin=item['closestalt']-mapper.parse_elev(item['elev'])
     except Exception:
         return "#0000ff" #Unknown margin, unknown height
     if item['dist']>0.6/1.852:
         return None #Not really too close anyway
     if margin<0:
         return "#ff3030"
     if margin<vertlimit:
         return "#ffb0b0"
     return None    
Esempio n. 15
0
    def get_wind(self, elev):
        if type(elev) != int:
            elev = mapper.parse_elev(elev)
        ielev = int(elev)
        #print "Ielev: ",ielev
        twothousand, fl50, fl100 = self.winds['2000'], self.winds[
            'FL50'], self.winds['FL100']

        def dcos(x):
            return math.cos(x / (180.0 / math.pi))

        def dsin(x):
            return math.sin(x / (180.0 / math.pi))

        def ipol(a, b, f):
            ax = dcos(a['direction']) * a['knots']
            ay = dsin(a['direction']) * a['knots']
            bx = dcos(b['direction']) * b['knots']
            by = dsin(b['direction']) * b['knots']
            x = (ax * (1.0 - f) + bx * f)
            y = (ay * (1.0 - f) + by * f)
            direction = (180.0 / math.pi) * math.atan2(y, x)
            if direction < 0: direction += 360.0
            knots = math.sqrt(x**2 + y**2)
            res = dict(direction=direction, knots=knots)
            #print "\nInterpolated %s and %s with f=%s into %s\n"%(a,b,f,res)
            return res

        if ielev < 2000:
            return dict(knots=twothousand['knots'],
                        direction=twothousand['direction'])
        elif ielev < 5000:
            return ipol(twothousand, fl50,
                        (ielev - 2000.0) / (5000.0 - 2000.0))
        elif ielev < 10000:
            return ipol(fl50, fl100, (ielev - 5000.0) / (10000.0 - 5000.0))
        elif ielev >= 10000:
            return dict(knots=fl100['knots'], direction=fl100['direction'])
        return dict(knots=0, direction=0)
Esempio n. 16
0
def get_obstacle_free_height_on_line(pos1,pos2):
    
    minimum_distance=2.0
    
    merc1=mapper.latlon2merc(pos1,13)
    merc2=mapper.latlon2merc(pos2,13)
    
    onenm=mapper.approx_scale(merc1,13,1.0)
    av=Vertex(int(merc1[0]),int(merc1[1]))
    bv=Vertex(int(merc2[0]),int(merc2[1]))
    linelen=(av-bv).approxlength()
    l=Line(av,bv)
    bb=BoundingBox(min(merc1[0],merc2[0]),
                   min(merc1[1],merc2[1]),
                   max(merc1[0],merc2[0]),
                   max(merc1[1],merc2[1])).expanded(onenm*minimum_distance*1.5)
    
    obstacles=[0]
    for item in chain(notam_geo_search.get_notam_objs_cached()['obstacles'],
                      extracted_cache.get_obstacles_in_bb(bb)):
        if not 'pos' in item: continue        
        if not 'elev' in item: continue        
        try:
            itemmerc=mapper.latlon2merc(mapper.from_str(item['pos']),13)            
        except Exception:
            print "Bad coord:",item['pos']
            continue
        itemv=Vertex(int(itemmerc[0]),int(itemmerc[1]))
        onenm=mapper.approx_scale(itemmerc,13,1.0)        
        actualclosest=l.approx_closest(itemv)        
        
        
        actualdist=(actualclosest-itemv).approxlength()/onenm
        if actualdist<minimum_distance:
            itemalt=mapper.parse_elev(item['elev'])
            obstacles.append(itemalt)
            
    minstep=2*onenm
            
    stepcount=linelen/float(minstep)
    if stepcount>100:
        newstep=linelen/100.0
        if newstep>minstep:
            minstep=newstep
        
    if linelen<1e-3:
        linelen=1e-3
    along=0.0
    #isfirstorlast=(idx==0 or idx==l-1)        
    while True:
        alongf=float(along)/float(linelen)
        end=False
        if alongf>1.0:
            alongf=1.0
            end=True
        merc=((1.0-alongf)*merc1[0]+(alongf)*merc2[0],
              (1.0-alongf)*merc1[1]+(alongf)*merc2[1])        
        latlon=mapper.merc2latlon(merc,13)
        elev=get_terrain_elev_in_box_approx(latlon,2.0*minstep/onenm)
        obstacles.append(elev)            
        along+=minstep
        if end: break
            
    return max(obstacles)
Esempio n. 17
0
def get_obstacle_free_height_on_line(pos1, pos2):

    minimum_distance = 2.0

    merc1 = mapper.latlon2merc(pos1, 13)
    merc2 = mapper.latlon2merc(pos2, 13)

    onenm = mapper.approx_scale(merc1, 13, 1.0)
    av = Vertex(int(merc1[0]), int(merc1[1]))
    bv = Vertex(int(merc2[0]), int(merc2[1]))
    linelen = (av - bv).approxlength()
    l = Line(av, bv)
    bb = BoundingBox(min(merc1[0], merc2[0]), min(merc1[1], merc2[1]),
                     max(merc1[0], merc2[0]),
                     max(merc1[1],
                         merc2[1])).expanded(onenm * minimum_distance * 1.5)

    obstacles = [0]
    for item in chain(notam_geo_search.get_notam_objs_cached()['obstacles'],
                      extracted_cache.get_obstacles_in_bb(bb)):
        if not 'pos' in item: continue
        if not 'elev' in item: continue
        try:
            itemmerc = mapper.latlon2merc(mapper.from_str(item['pos']), 13)
        except Exception:
            print "Bad coord:", item['pos']
            continue
        itemv = Vertex(int(itemmerc[0]), int(itemmerc[1]))
        onenm = mapper.approx_scale(itemmerc, 13, 1.0)
        actualclosest = l.approx_closest(itemv)

        actualdist = (actualclosest - itemv).approxlength() / onenm
        if actualdist < minimum_distance:
            itemalt = mapper.parse_elev(item['elev'])
            obstacles.append(itemalt)

    minstep = 2 * onenm

    stepcount = linelen / float(minstep)
    if stepcount > 100:
        newstep = linelen / 100.0
        if newstep > minstep:
            minstep = newstep

    if linelen < 1e-3:
        linelen = 1e-3
    along = 0.0
    #isfirstorlast=(idx==0 or idx==l-1)
    while True:
        alongf = float(along) / float(linelen)
        end = False
        if alongf > 1.0:
            alongf = 1.0
            end = True
        merc = ((1.0 - alongf) * merc1[0] + (alongf) * merc2[0],
                (1.0 - alongf) * merc1[1] + (alongf) * merc2[1])
        latlon = mapper.merc2latlon(merc, 13)
        elev = get_terrain_elev_in_box_approx(latlon, 2.0 * minstep / onenm)
        obstacles.append(elev)
        along += minstep
        if end: break

    return max(obstacles)
Esempio n. 18
0
def ev_parse_x(url):
    out = []
    parser = lxml.html.HTMLParser()
    data, date = fetchdata.getdata(url, country="ev")
    parser.feed(data)
    tree = parser.close()
    got_fir = False
    for table in tree.xpath("//table"):
        #print "Table with %d children"%(len(table.getchildren()),)
        rows = list(table.xpath(".//tr"))

        #for idx,col in enumerate(cols):
        #    print "Col %d, %s"%(idx,alltext(col)[:10])
        headingcols = rows[0].xpath(".//th")
        if len(headingcols) == 0: continue
        name, alt = headingcols[0:2]
        if alltext(name).count("QNH") and len(headingcols) > 6:
            continue
        print alltext(name)
        assert alltext(name).lower().count("name") or alltext(
            name).lower().count("lateral")
        print alltext(alt)
        assert alltext(alt).lower().count("limit")

        for row in rows[1:]:
            cols = list(row.xpath(".//td"))
            if len(cols) < 2: continue
            name, alt = cols[:2]
            lines = [x.strip() for x in alltext(name).split("\n") if x.strip()]
            if len(lines) == 0: continue
            assert len(lines)

            spacename = lines[0].strip()
            if spacename.strip(
            ) == "A circle radius 0,5 NM centered on 565705N 0240619E EVR2 RIGA":
                spacename = "EVR2 RIGA"
                lines = [spacename, lines[0][:-len(spacename)].strip()
                         ] + lines[1:]
            print spacename
            if spacename.strip() == "SKRIVERI":
                continue
            print "Spacename is:", spacename
            assert spacename[:3] in ["EVR","EVP","TSA","TRA"] or \
                spacename.endswith("ATZ") or \
                spacename.endswith("ATZ (MILITARY)")

            altcand = []
            for altc in alltext(alt).split("\n"):
                if altc.count("Real-time"): continue
                altcand.append(altc.strip())
            print "Altcands:", altcand
            ceiling, floor = [x.strip() for x in " ".join(altcand).split("/")]
            ceiling = strangefix(ceiling)
            floor = strangefix(floor)

            mapper.parse_elev(ceiling)
            ifloor = mapper.parse_elev(floor)
            iceiling = mapper.parse_elev(ceiling)
            if ifloor >= 9500 and iceiling >= 9500:
                continue
            assert ifloor < iceiling

            freqs = []
            raw = " ".join(lines[1:])
            raw = re.sub(
                s(ur"Area bounded by lines successively joining the following points:"
                  ), "", raw)
            print "Raw:", raw

            coords = mapper.parse_coord_str(raw, context='latvia')
            for cleaned in clean_up_polygon(coords):
                out.append(
                    dict(name=spacename,
                         points=cleaned,
                         type="R",
                         freqs=freqs,
                         floor=floor,
                         url=url,
                         date=date,
                         ceiling=ceiling))

    return out
Esempio n. 19
0
            spacename = coordstr[0]
            assert spacename == "CTR"
            for sub in coordstr[1:]:
                cstr.append(sub.strip().rstrip("."))

            def fixfunc(m):
                return "".join(m.groups())

            raw = re.sub(ur"(\d{2,3})\s*(\d{2})\s*(\d{2})\s*([NSEW])", fixfunc,
                         "".join(cstr)).replace(",", " - ")
            print "parsing raw:", raw
            points = mapper.parse_coord_str(raw, context='lithuania')

            print "Limitstr", limitstr
            floor, ceiling = re.match(ur"(.*)\s*to\s*(.*)", limitstr).groups()
            mapper.parse_elev(floor)
            mapper.parse_elev(ceiling)

            spacenamestem = spacename.strip()
            if spacenamestem.endswith("CTR"):
                spacenamestem = spacenamestem[:-3].strip()
            if spacenamestem.endswith("FIZ"):
                spacenamestem = spacenamestem[:-3].strip()
            #construct names
            newfreqs = []
            for serv, freq in freqs:
                serv = serv.strip()
                if serv == 'TWR':
                    servicelong = "Tower"
                elif serv.startswith('APP'):
                    servicelong = "Approach"
Esempio n. 20
0
def ev_parse_tma():
    out = []
    parser = lxml.html.HTMLParser()
    #url="/Latvia_EV-ENR-2.1-en-GB.html"
    cur_airac = get_cur_airac()
    url = "/eAIPfiles/%s-AIRAC/html/eAIP/EV-ENR-2.1-en-GB.html" % (cur_airac, )

    data, date = fetchdata.getdata(url, country='ev')
    parser.feed(data)
    tree = parser.close()

    got_fir = False
    for table in tree.xpath("//table"):
        #print "Table with %d children"%(len(table.getchildren()),)
        rows = list(table.xpath(".//tr"))
        for idx in xrange(5):
            headingrow = rows[idx]
            cols = list(headingrow.xpath(".//th"))
            #print len(cols)
            if len(cols) == 5:
                break
        else:
            raise Exception("No heading row")
        assert idx == 0
        #for idx,col in enumerate(cols):
        #    print "Col %d, %s"%(idx,alltext(col)[:10])
        nameh, unith, callsignh, freqh, remarkh = cols
        assert alltext(nameh).lower().count("name")
        assert alltext(unith).lower().count("unit")
        assert re.match(ur"call\s*sign", alltext(callsignh).lower())
        lastcols = None
        for row in rows[1:]:
            cols = list(row.xpath(".//td"))
            if len(cols) == 5:
                name, unit, callsign, freq, remark = cols
                lastcols = cols
            else:
                if lastcols:
                    unit, callsign, freq, remark = lastcols[1:]
                    name = cols[0]
                else:
                    continue

            lines = [x.strip() for x in alltext(name).split("\n") if x.strip()]
            if len(lines) == 0: continue
            spacename = lines[0].strip()

            if re.match(ur"RIGA\s*UTA|RIGA\s*CTA|RIGA\s*AOR.*", spacename):
                continue
            freqstr = alltext(freq)
            callsignstr = alltext(callsign)
            if freqstr.strip():
                print freqstr
                freqmhzs = re.findall(ur"\d{3}\.\d{3}", freqstr)
                assert len(freqmhzs) <= 2
                callsigns = [callsignstr.split("\n")[0].strip()]
                freqs = []
                for idx, freqmhz in enumerate(freqmhzs):
                    if freqmhz == '121.500': continue
                    freqs.append((callsigns[idx], float(freqmhz)))
                print "freqs:", freqs
            else:
                freqs = []
            assert len(lines)

            classidx = next(idx for idx, x in reversed(list(enumerate(lines)))
                            if x.lower().count("class of airspace"))

            if re.match(ur"RIGA\s*FIR.*UIR", spacename, re.UNICODE):
                got_fir = True
                lastspaceidx = classidx - 2
                floor = "GND"
                ceiling = "-"
                type_ = "FIR"
            else:
                if lines[classidx - 1].count("/") == 1:
                    floor, ceiling = lines[classidx - 1].split("/")
                    lastspaceidx = classidx - 1
                else:
                    floor = lines[classidx - 1]
                    ceiling = lines[classidx - 2]
                    lastspaceidx = classidx - 2
                ceiling = strangefix(ceiling)
                floor = strangefix(floor)

                mapper.parse_elev(ceiling)
                mapper.parse_elev(floor)
                type_ = "TMA"
            tcoords = lines[1:lastspaceidx]
            #verify that we got actual altitudes:
            coords = []
            for coord in tcoords:
                coord = coord.strip().replace("(counter-)", "").replace(
                    "(RIGA DVOR - RIA)", "")
                if coord.endswith(u"E") or coord.endswith("W"):
                    coord = coord + " -"
                coords.append(coord)

            raw = " ".join(coords)
            raw = re.sub(
                s(ur"Area bounded by lines successively joining the following points:"
                  ), "", raw)
            print "Raw:", raw
            coords = mapper.parse_coord_str(raw, context='latvia')
            for cleaned in clean_up_polygon(coords):
                out.append(
                    dict(name=spacename,
                         points=cleaned,
                         type=type_,
                         freqs=freqs,
                         floor=floor,
                         url=url,
                         date=date,
                         ceiling=ceiling))
                if type_ == 'FIR':
                    out[-1]['icao'] = "EVRR"
Esempio n. 21
0
def get_notam_objs(kind=None):
    notamupdates = meta.Session.query(NotamUpdate).filter(
        NotamUpdate.disappearnotam == sa.null()).all()
    obstacles = []
    others = []
    spaces = []
    areas = []
    for u in notamupdates:
        text = u.text.strip()

        if text.count("W52355N0234942E"):
            text = text.replace("W52355N0234942E", "652355N0234942E")
        coordgroups = []
        for line in text.split("\n"):
            dig = False
            for char in line:
                if char.isdigit():
                    dig = True
            if dig == False:
                if len(coordgroups) and coordgroups[-1] != "":
                    coordgroups.append("")
            else:
                if len(coordgroups) == 0: coordgroups = [""]
                coordgroups[-1] += line + "\n"

        if (kind == None or kind == "notamarea"):

            for radius, unit, lat, lon in chain(
                    re.findall(
                        r"RADIUS\s*(?:OF)?\s*(\d+)\s*(NM|M)\s*(?:CENT[ERD]+|FR?O?M)?\s*(?:ON)?\s*(?:AT)?\s*(\d+[NS])\s*(\d+[EW])",
                        text),
                    re.findall(
                        r"(\d+)\s*(NM|M)\s*RADIUS\s*(?:CENT[ERD]+)?\s*(?:ON|AT|FROM)?\s*(\d+[NS])\s*(\d+[EW])",
                        text),
                    re.findall(
                        r"(\d+)\s*(NM|M)\s*RADIUS.*?[^0-9](\d+[NS])\s*(\d+[EW])",
                        text, re.DOTALL)):
                try:
                    radius = float(radius)
                    if unit == "M":
                        radius = radius / 1852.0
                    else:
                        assert unit == "NM"
                    centre = mapper.parse_coords(lat, lon)
                    coords = mapper.create_circle(centre, radius)
                    areas.append(
                        dict(points=coords,
                             kind="notamarea",
                             name=text,
                             type="notamarea",
                             notam_ordinal=u.appearnotam,
                             notam_line=u.appearline,
                             notam=text))
                except Exception, cause:
                    print "Invalid notam coords: %s,%s" % (lat, lon)

        for coordgroup in coordgroups:
            try:
                coords = list(mapper.parse_lfv_area(coordgroup, False))
            except Exception, cause:
                print "Parsing,", coordgroup
                print "Exception parsing lfv area from notam:%s" % (cause, )
                coords = []

            if len(coords) == 0: continue
            if text.count("OBST") and (kind == None or kind == "obstacle"):
                elevs = re.findall(r"ELEV\s*(\d+)\s*FT", text)
                elevs = [int(x) for x in elevs if x.isdigit()]
                if len(elevs) != 0:
                    elev = max(elevs)
                    for coord in coords:
                        obstacles.append(
                            dict(pos=coord,
                                 elev=elev,
                                 elevf=mapper.parse_elev(elev),
                                 kind='notam',
                                 notam_ordinal=u.appearnotam,
                                 notam_line=u.appearline,
                                 name=text.split("\n")[0],
                                 notam=text))
                    continue
            couldbearea = True
            if len(coords) <= 2:
                couldbearea = False
            if text.count("PSN") >= len(coords) - 2:
                couldbearea = False
            if couldbearea == False and (kind == None or kind == "notam"):
                for coord in coords:
                    others.append(
                        dict(pos=coord,
                             kind='notam',
                             name=text,
                             notam_ordinal=u.appearnotam,
                             notam_line=u.appearline,
                             notam=text))
            if couldbearea == True and (kind == None or kind == "notamarea"):
                if len(coords) > 2:
                    if text.startswith("AREA: "):
                        continue  #These aren't real notams, they're area-specifications for all other notams... make this better some day.
                    areas.append(
                        dict(points=coords,
                             kind="notamarea",
                             name=text,
                             type="notamarea",
                             notam_ordinal=u.appearnotam,
                             notam_line=u.appearline,
                             notam=text))
Esempio n. 22
0
                heights.append(gnd.strip())
            if unl:
                heights.append(unl.strip())
        uprint("heights for ", d['name'], ":", repr(heights))
        if len(heights) == 0 and d['name'] == u'GÖTEBORG TMA':
            heights = ['GND', 'FL95']
        if len(heights) == 1 and d['name'] == u'Göteborg TMA':
            heights = ['4500', 'FL95']
        assert len(heights) == 2
        ceiling = heights[0].strip()
        floor = heights[1].strip()

        pa['name'] = d['name']
        pa['floor'] = floor
        pa['ceiling'] = ceiling
        if mapper.parse_elev(floor) >= 9500:
            continue
        #uprint("Arealines:\n================\n%s\n============\n"%(arealines[:last_coord_idx]))
        #print pa
        areacoords = " ".join(arealines[:last_coord_idx])
        pa['points'] = parse_coord_str(areacoords)

        vs = []
        for p in pa['points']:
            #print "from_str:",repr(p)
            x, y = mapper.latlon2merc(mapper.from_str(p), 13)
            vs.append(Vertex(int(x), int(y)))

        p = Polygon(vvector(vs))
        if p.calc_area() <= 30 * 30:
            pass  #print pa
Esempio n. 23
0
import fplan.lib.mapper as mapper
import re
from fplan.lib.poly_cleaner import clean_up_polygon

def ey_parse_tma():
    out=[]
    
    def emit(name,coordstr,limits,type="TMA",freqs=[],date=datetime(2011,03,25),icao=None):
        ceiling,floor=limits.split("/")
        def compact(m):
            return "".join(m.groups())
        coordstr=re.sub(ur"(\d{2,3})\s*(\d{2})\s*(\d{2})",compact,coordstr)
        coordstr=re.sub(ur"NM from KNA to ","NM from 545740N 0240519E to",coordstr)
        print coordstr
        tpoints=mapper.parse_coord_str(coordstr,context='lithuania')
        f1=mapper.parse_elev(floor)
        c1=mapper.parse_elev(ceiling)
        if c1!='-':
            assert c1>f1
        for points in clean_up_polygon(tpoints):
            out.append(
                dict(
                     name=name,
                     floor=floor,
                     ceiling=ceiling,
                     freqs=freqs,
                     points=points,
                     type=type
                     )
            )
            if icao:
Esempio n. 24
0
def parse_page(parser,pagenr):   
    page=parser.parse_page_to_items(pagenr)
    items=page.items
    minx=min([item.x1 for item in items])
    headings=[]
    majorre=ur"\s*([A-ZÅÄÖ ][A-ZÅÄÖ]{3,})\s+(?:TMA|MIL CTA)\s*(?:-.*)?$"
    minorre=ur"\s*(?:TMA|MIL CTA [SN]?)\s*[A-ZÅÄÖ ]*\s*"
    airwayre=ur"(AWY\s+EF\s+[-A-Z]+)"
    delegre=ur".*(Delegation\s+of\s+responsibility).*"
    for item in page.get_by_regex(majorre):
        m,=re.match(majorre,item.text).groups()
        assert m!=None
        assert m.strip()!=""
        headings.append(('major',item.text.strip(),m,item))
    for item in page.get_by_regex(airwayre):
        m,=re.match(airwayre,item.text).groups()
        assert m!=None
        assert m.strip()!=""
        headings.append(('airway',item.text.strip(),m,item))
    for item in page.get_by_regex(minorre):
        m=re.match(minorre,item.text).group()
        assert m!=None
        assert m.strip()!=""
        #print "Heading %d: %s"%(item.y1,m)
        headings.append(('minor',item.text.strip(),m,item))
    for item in page.get_by_regex(delegre):
        m,=re.match(delegre,item.text).groups()
        assert m!=None
        assert m.strip()!=""
        headings.append(('deleg',item.text.strip(),m,item))
    #print headings
    headings.sort(key=lambda x:x[3].y1)
    def findheadingfor(y,meta=None):
        minor=None
        major=None
        #print "HEadings:",headings
        for (kind,full,name,item) in reversed(headings):
            #print "Checking %s,%s (state: minor %s / major %s)"%(kind,item.y1,minor,major)
            if kind=='airway' and item.y1<y:
                return name,"airway"
            if kind=='deleg' and item.y1<y:
                return name,"deleg"
            if minor==None and kind=="minor" and item.y1<y:
                minor=name.strip()
                if meta!=None: meta['minor_y']=item.y1
            if major==None and kind=="major" and item.y1<y:
                major=name.strip()
                fullname=full
                if meta!=None: meta['major_y']=item.y1
                break
        assert major!=None and major.strip()!=""
        if minor!=None:
            return major+" "+minor,"area"
        return fullname,"area"
    cury=0
    coordstrs=page.get_by_regex(ur".*\d{6}N \d{7}E.*")

    airway_width=None
    airway_vlim=None
    for item in page.get_partially_in_rect(0,0,100,15):
        if item.text.upper().count("WID NM"):
            airway_width=(item.x1,item.x2)
        if item.text.lower().count("vertical limits"):
            airway_vlim=(item.x1,item.x2) 
    
    out=[]
    atsout=[]
    while True:
        found=False
        #print "Looking for coords, y= %d"%(cury,)
        for titem in coordstrs:
            #print "Considering coordstr: ",titem.y1
            if titem.y1<=cury: continue
            if titem.x1<40: 
                item=titem
                found=True
                break
        if not found: break
        cury=item.y1
        headmeta=dict()
        name,hkind=findheadingfor(item.y1,headmeta)
        
        if hkind=='airway':
            assert airway_width and airway_vlim
            
            lines=page.get_lines(page.get_partially_in_rect(0,cury,minx+35,100),order_fudge=6)
            y1=cury
            y2=100
            coordlines=[]
            for idx,line in enumerate(lines):
                if line.count("AWY") and line.count("EF"): 
                    y2=line.y1
                    break            
                coordlines.append(line.strip())
            coordstr=" ".join(coordlines)
            inpoints=[mapper.parse_coords(lat,lon) for lat,lon in re.findall(r"(\d+N) (\d+E)",coordstr)]
                        
            for wcand in page.get_partially_in_rect(airway_width[0],y1+0.05,airway_width[1],y2-0.05):
                width_nm=float(re.match(r"(\d+\.?\d*)",wcand.text).groups()[0])
                
            elevs=[]
            for vcand in page.get_partially_in_rect(airway_vlim[0],y1+0.05,airway_vlim[1],y2-0.05):                
                elevs.append(re.match(r"(FL\s*\d+)",vcand.text).groups()[0])
            elevs.sort(key=lambda x:mapper.parse_elev(x))
            floor,ceiling=elevs
                
            atsout.append(dict(
                floor=floor,
                ceiling=ceiling,
                freqs=[],
                type="RNAV",
                name=name,
                points=ats_routes.get_latlon_outline(inpoints, width_nm)))
                                    
            cury=y2          
            continue            
        elif hkind=='deleg':
                        
            y2=cury+1
            continue            
        else:
            areaspec=[]
            #print "Rect: ",0,cury,minx+35,100
            y1=cury
            lines=page.get_lines(page.get_partially_in_rect(0,cury,minx+35,100),order_fudge=10)
            for idx,line in enumerate(lines):
                if re.search(ur"FL \d+",line) or line.count("FT MSL"): 
                    vertidx=idx
                    break            
                #print "Line:",line.encode('utf8')
                if line.strip()=="":
                    vertidx=idx
                    break
                cury=max(cury,line.y2+0.5)                
                line=line.replace(u"–","-")
                if not (line.endswith("-") or line.endswith(" ")):
                    line+=" "                
                areaspec.append(line)
            verts=[]
            
            for idx in xrange(vertidx,len(lines)):
                #print "Looking for alt:",lines[idx],"y2:",lines[idx].y2
                m=re.search(ur"(FL\s+\d+)",lines[idx].strip())
                if m:
                    verts.append((m.groups()[0],lines[idx].y1))
                m=re.search(ur"(\d+ FT (?:MSL|GND|SFC))",lines[idx].strip())
                if m:
                    verts.append((m.groups()[0],lines[idx].y1))
                if len(verts)>=2: break
            y2=verts[-1][1]
            
        
        freqs=[]
        for attempt in xrange(2):
            for freqcand in page.get_by_regex(ur".*\d{3}\.\d{3}.*"):
                #print "headmeta:",headmeta
                #print "attempt:",attempt
                #print "freqy1:",freqcand.y1
                if freqcand.x1<30: continue
                if attempt==0:
                    if freqcand.y1<y1: continue
                else:
                    if 'major_y' in headmeta:                    
                        if freqcand.y1<headmeta['major_y']: continue
                    else:
                        if freqcand.y1<y1: continue
                                
                    
                if freqcand.y1>y2: continue
                x,y=freqcand.x1,freqcand.y1
                lines=page.get_lines(page.get_partially_in_rect(x+0.1,y-10,x+5,y-0.1))

                freq,=re.match(ur".*(\d{3}\.\d{3}).*",freqcand.text).groups()
                fname=None
                for line in reversed(lines):
                    if re.match(ur"[A-ZÅÄÖ ]{3,}",line):                        
                        #print "freqname Matched:",line
                        fname=line.strip()
                        break
                if not fname: raise Exception("Found no frequency name for freq: "+freq)
                freqs.append((fname,float(freq)))
            if len(freqs): break
Esempio n. 25
0
def ee_parse_restrictions():
    spaces=[]
    p=parse.Parser("/ee_restricted_and_danger.pdf",lambda x: x,country='ee')
    for pagenr in xrange(p.get_num_pages()):        
        page=p.parse_page_to_items(pagenr)
        raws=list(sorted(page.get_by_regex(ur"EE[RD]\d+\s+.*"),key=lambda x:x.y1))+[None]
        if len(raws)>1:
            elevs=page.get_by_regex(ur"\d+\s*FT\s*MSL|FL\s*\d+")
            assert elevs
            elevcol=min(elev.x1 for elev in elevs)
            assert elevcol!=100
            for cur,next in izip(raws[:-1],raws[1:]):
                #if cur.text.count("Tunnus, nimi ja sivurajat"): continue #not a real airspace
                space=dict()
                if next==None:
                    y2=100
                else:
                    y2=next.y1-1.75
                name=cur.text.strip()
                space['name']=name
                

            
                areaspecprim=page.get_lines(page.get_partially_in_rect(cur.x1+0.01,cur.y2+0.05,elevcol-2,y2),
                                            fudge=.25)
                #print "areaspecprim:\n","\n".join(areaspecprim)
                areaspec=[]
                for area in areaspecprim:
                    print "area in ",area
                    area=area.replace(u"–","-")
                    if len(areaspec) and area.strip()=="": break
                    area=re.sub(ur"\w-$","",area)
                    areaspec.append(area)
                #print "Y-interval:",cur.y1,y2,"next:",next
                #print "Name:",space['name']
                #print "areaspec:",areaspec
                inp=" ".join(areaspec)
                #print inp
                #raw_input()
                
                tpoints=mapper.parse_coord_str(inp,context='estonia')
                if name.startswith("EER1"): 
                    tseaborder="592842N 0280054E - 593814N 0273721E - 593953N 0265728E - 594513N 0264327E"
                    seapoints=mapper.parse_coord_str(tseaborder)
                    cont=None      
                    points=[]
                    def close(a,b):
                        bearing,dist=mapper.bearing_and_distance(
                                    mapper.from_str(a),mapper.from_str(b))
                        #print (a,b),dist
                        return dist<1.0
                    for idx,point in enumerate(tpoints):
                        points.append(point)    
                        if close(point,seapoints[0]):
                            print "WAS CLOSE",point,seapoints[0]
                            points.extend(seapoints[1:-1])
                            for idx2,point in enumerate(tpoints[idx+1:]):
                                if close(point,seapoints[-1]):
                                    points.extend(tpoints[idx+1+idx2:])
                                    break
                            else:
                                raise Exception("Couldn't find seaborder end")
                            break                    
                    else:
                        raise Exception("Couldn't find seaborder")
                else:
                    points=tpoints
                space['points']=points
                vertitems=page.get_partially_in_rect(elevcol,cur.y1+0.05,elevcol+8,y2+1.5)
                vertspec=[]
                for v in page.get_lines(vertitems):
                    if v.strip()=="": continue
                    if v.strip().count("Lennuliiklusteeninduse AS"): 
                        continue
                    vertspec.append(v.strip())
                
                print "vertspec:",vertspec
                assert len(vertspec)==2
                ceiling,floor=vertspec
                
                if mapper.parse_elev(floor)>=9500 and mapper.parse_elev(ceiling)>=9500:
                    continue
                
                space['ceiling']=ceiling
                space['floor']=floor
                space['type']='R'
                space['freqs']=[]
                spaces.append(space)
                


    spaces.append(dict(
        name="EE TSA 1",
        ceiling="UNL",
        floor="5000 FT GND",
        points=mapper.parse_coord_str(u""" 
            594500N 0255000E – 594500N 0261800E – 
            592100N 0265800E – 591200N 0261200E – 
            591600N 0255400E – 594500N 0255000E"""),
        type="TSA",
        date=datetime(2011,03,25),
        freqs=[]))
def ee_parse_restrictions():
    spaces = []
    p = parse.Parser("/ee_restricted_and_danger.pdf",
                     lambda x: x,
                     country='ee')
    for pagenr in xrange(p.get_num_pages()):
        page = p.parse_page_to_items(pagenr)
        raws = list(
            sorted(page.get_by_regex(ur"EE[RD]\d+\s+.*"),
                   key=lambda x: x.y1)) + [None]
        if len(raws) > 1:
            elevs = page.get_by_regex(ur"\d+\s*FT\s*MSL|FL\s*\d+")
            assert elevs
            elevcol = min(elev.x1 for elev in elevs)
            assert elevcol != 100
            for cur, next in izip(raws[:-1], raws[1:]):
                #if cur.text.count("Tunnus, nimi ja sivurajat"): continue #not a real airspace
                space = dict()
                if next == None:
                    y2 = 100
                else:
                    y2 = next.y1 - 1.75
                name = cur.text.strip()
                space['name'] = name

                areaspecprim = page.get_lines(page.get_partially_in_rect(
                    cur.x1 + 0.01, cur.y2 + 0.05, elevcol - 2, y2),
                                              fudge=.25)
                #print "areaspecprim:\n","\n".join(areaspecprim)
                areaspec = []
                for area in areaspecprim:
                    print "area in ", area
                    area = area.replace(u"–", "-")
                    if len(areaspec) and area.strip() == "": break
                    area = re.sub(ur"\w-$", "", area)
                    areaspec.append(area)
                #print "Y-interval:",cur.y1,y2,"next:",next
                #print "Name:",space['name']
                #print "areaspec:",areaspec
                inp = " ".join(areaspec)
                #print inp
                #raw_input()

                tpoints = mapper.parse_coord_str(inp, context='estonia')
                if name.startswith("EER1"):
                    tseaborder = "592842N 0280054E - 593814N 0273721E - 593953N 0265728E - 594513N 0264327E"
                    seapoints = mapper.parse_coord_str(tseaborder)
                    cont = None
                    points = []

                    def close(a, b):
                        bearing, dist = mapper.bearing_and_distance(
                            mapper.from_str(a), mapper.from_str(b))
                        #print (a,b),dist
                        return dist < 1.0

                    for idx, point in enumerate(tpoints):
                        points.append(point)
                        if close(point, seapoints[0]):
                            print "WAS CLOSE", point, seapoints[0]
                            points.extend(seapoints[1:-1])
                            for idx2, point in enumerate(tpoints[idx + 1:]):
                                if close(point, seapoints[-1]):
                                    points.extend(tpoints[idx + 1 + idx2:])
                                    break
                            else:
                                raise Exception("Couldn't find seaborder end")
                            break
                    else:
                        raise Exception("Couldn't find seaborder")
                else:
                    points = tpoints
                space['points'] = points
                vertitems = page.get_partially_in_rect(elevcol, cur.y1 + 0.05,
                                                       elevcol + 8, y2 + 1.5)
                vertspec = []
                for v in page.get_lines(vertitems):
                    if v.strip() == "": continue
                    if v.strip().count("Lennuliiklusteeninduse AS"):
                        continue
                    vertspec.append(v.strip())

                print "vertspec:", vertspec
                assert len(vertspec) == 2
                ceiling, floor = vertspec

                if mapper.parse_elev(floor) >= 9500 and mapper.parse_elev(
                        ceiling) >= 9500:
                    continue

                space['ceiling'] = ceiling
                space['floor'] = floor
                space['type'] = 'R'
                space['freqs'] = []
                spaces.append(space)

    spaces.append(
        dict(name="EE TSA 1",
             ceiling="UNL",
             floor="5000 FT GND",
             points=mapper.parse_coord_str(u""" 
            594500N 0255000E – 594500N 0261800E – 
            592100N 0265800E – 591200N 0261200E – 
            591600N 0255400E – 594500N 0255000E"""),
             type="TSA",
             date=datetime(2011, 03, 25),
             freqs=[]))
Esempio n. 27
0
def parse_page(parser, pagenr):
    page = parser.parse_page_to_items(pagenr)
    items = page.items
    minx = min([item.x1 for item in items])
    headings = []
    majorre = ur"\s*([A-ZÅÄÖ ][A-ZÅÄÖ]{3,})\s+(?:TMA|MIL CTA)\s*(?:-.*)?$"
    minorre = ur"\s*(?:TMA|MIL CTA [SN]?)\s*[A-ZÅÄÖ ]*\s*"
    airwayre = ur"(AWY\s+EF\s+[-A-Z]+)"
    delegre = ur".*(Delegation\s+of\s+responsibility).*"
    for item in page.get_by_regex(majorre):
        m, = re.match(majorre, item.text).groups()
        assert m != None
        assert m.strip() != ""
        headings.append(('major', item.text.strip(), m, item))
    for item in page.get_by_regex(airwayre):
        m, = re.match(airwayre, item.text).groups()
        assert m != None
        assert m.strip() != ""
        headings.append(('airway', item.text.strip(), m, item))
    for item in page.get_by_regex(minorre):
        m = re.match(minorre, item.text).group()
        assert m != None
        assert m.strip() != ""
        #print "Heading %d: %s"%(item.y1,m)
        headings.append(('minor', item.text.strip(), m, item))
    for item in page.get_by_regex(delegre):
        m, = re.match(delegre, item.text).groups()
        assert m != None
        assert m.strip() != ""
        headings.append(('deleg', item.text.strip(), m, item))
    #print headings
    headings.sort(key=lambda x: x[3].y1)

    def findheadingfor(y, meta=None):
        minor = None
        major = None
        #print "HEadings:",headings
        for (kind, full, name, item) in reversed(headings):
            #print "Checking %s,%s (state: minor %s / major %s)"%(kind,item.y1,minor,major)
            if kind == 'airway' and item.y1 < y:
                return name, "airway"
            if kind == 'deleg' and item.y1 < y:
                return name, "deleg"
            if minor == None and kind == "minor" and item.y1 < y:
                minor = name.strip()
                if meta != None: meta['minor_y'] = item.y1
            if major == None and kind == "major" and item.y1 < y:
                major = name.strip()
                fullname = full
                if meta != None: meta['major_y'] = item.y1
                break
        assert major != None and major.strip() != ""
        if minor != None:
            return major + " " + minor, "area"
        return fullname, "area"

    cury = 0
    coordstrs = page.get_by_regex(ur".*\d{6}N \d{7}E.*")

    airway_width = None
    airway_vlim = None
    for item in page.get_partially_in_rect(0, 0, 100, 15):
        if item.text.upper().count("WID NM"):
            airway_width = (item.x1, item.x2)
        if item.text.lower().count("vertical limits"):
            airway_vlim = (item.x1, item.x2)

    out = []
    atsout = []
    while True:
        found = False
        #print "Looking for coords, y= %d"%(cury,)
        for titem in coordstrs:
            #print "Considering coordstr: ",titem.y1
            if titem.y1 <= cury: continue
            if titem.x1 < 40:
                item = titem
                found = True
                break
        if not found: break
        cury = item.y1
        headmeta = dict()
        name, hkind = findheadingfor(item.y1, headmeta)

        if hkind == 'airway':
            assert airway_width and airway_vlim

            lines = page.get_lines(page.get_partially_in_rect(
                0, cury, minx + 35, 100),
                                   order_fudge=6)
            y1 = cury
            y2 = 100
            coordlines = []
            for idx, line in enumerate(lines):
                if line.count("AWY") and line.count("EF"):
                    y2 = line.y1
                    break
                coordlines.append(line.strip())
            coordstr = " ".join(coordlines)
            inpoints = [
                mapper.parse_coords(lat, lon)
                for lat, lon in re.findall(r"(\d+N) (\d+E)", coordstr)
            ]

            for wcand in page.get_partially_in_rect(airway_width[0], y1 + 0.05,
                                                    airway_width[1],
                                                    y2 - 0.05):
                width_nm = float(
                    re.match(r"(\d+\.?\d*)", wcand.text).groups()[0])

            elevs = []
            for vcand in page.get_partially_in_rect(airway_vlim[0], y1 + 0.05,
                                                    airway_vlim[1], y2 - 0.05):
                elevs.append(re.match(r"(FL\s*\d+)", vcand.text).groups()[0])
            elevs.sort(key=lambda x: mapper.parse_elev(x))
            floor, ceiling = elevs

            atsout.append(
                dict(floor=floor,
                     ceiling=ceiling,
                     freqs=[],
                     type="RNAV",
                     name=name,
                     points=ats_routes.get_latlon_outline(inpoints, width_nm)))

            cury = y2
            continue
        elif hkind == 'deleg':

            y2 = cury + 1
            continue
        else:
            areaspec = []
            #print "Rect: ",0,cury,minx+35,100
            y1 = cury
            lines = page.get_lines(page.get_partially_in_rect(
                0, cury, minx + 35, 100),
                                   order_fudge=10)
            for idx, line in enumerate(lines):
                if re.search(ur"FL \d+", line) or line.count("FT MSL"):
                    vertidx = idx
                    break
                #print "Line:",line.encode('utf8')
                if line.strip() == "":
                    vertidx = idx
                    break
                cury = max(cury, line.y2 + 0.5)
                line = line.replace(u"–", "-")
                if not (line.endswith("-") or line.endswith(" ")):
                    line += " "
                areaspec.append(line)
            verts = []

            for idx in xrange(vertidx, len(lines)):
                #print "Looking for alt:",lines[idx],"y2:",lines[idx].y2
                m = re.search(ur"(FL\s+\d+)", lines[idx].strip())
                if m:
                    verts.append((m.groups()[0], lines[idx].y1))
                m = re.search(ur"(\d+ FT (?:MSL|GND|SFC))", lines[idx].strip())
                if m:
                    verts.append((m.groups()[0], lines[idx].y1))
                if len(verts) >= 2: break
            y2 = verts[-1][1]

        freqs = []
        for attempt in xrange(2):
            for freqcand in page.get_by_regex(ur".*\d{3}\.\d{3}.*"):
                #print "headmeta:",headmeta
                #print "attempt:",attempt
                #print "freqy1:",freqcand.y1
                if freqcand.x1 < 30: continue
                if attempt == 0:
                    if freqcand.y1 < y1: continue
                else:
                    if 'major_y' in headmeta:
                        if freqcand.y1 < headmeta['major_y']: continue
                    else:
                        if freqcand.y1 < y1: continue

                if freqcand.y1 > y2: continue
                x, y = freqcand.x1, freqcand.y1
                lines = page.get_lines(
                    page.get_partially_in_rect(x + 0.1, y - 10, x + 5,
                                               y - 0.1))

                freq, = re.match(ur".*(\d{3}\.\d{3}).*",
                                 freqcand.text).groups()
                fname = None
                for line in reversed(lines):
                    if re.match(ur"[A-ZÅÄÖ ]{3,}", line):
                        #print "freqname Matched:",line
                        fname = line.strip()
                        break
                if not fname:
                    raise Exception("Found no frequency name for freq: " +
                                    freq)
                freqs.append((fname, float(freq)))
Esempio n. 28
0
def get_notam_objs(kind=None):    
    notamupdates=meta.Session.query(NotamUpdate).filter(
              NotamUpdate.disappearnotam==sa.null()).all()
    obstacles=[]
    others=[]
    spaces=[]
    areas=[]
    for u in notamupdates:
        text=u.text.strip()

        if text.count("W52355N0234942E"):
            text=text.replace("W52355N0234942E","652355N0234942E")
        coordgroups=[]
        for line in text.split("\n"):
            dig=False
            for char in line:
                if char.isdigit():
                    dig=True
            if dig==False:
                if len(coordgroups) and coordgroups[-1]!="":
                    coordgroups.append("")
            else:
                if len(coordgroups)==0: coordgroups=[""]
                coordgroups[-1]+=line+"\n"

        if (kind==None or kind=="notamarea"):
            
            for radius,unit,lat,lon in chain(
                re.findall(r"RADIUS\s*(?:OF)?\s*(\d+)\s*(NM|M)\s*(?:CENT[ERD]+|FR?O?M)?\s*(?:ON)?\s*(?:AT)?\s*(\d+[NS])\s*(\d+[EW])",text),
                re.findall(r"(\d+)\s*(NM|M)\s*RADIUS\s*(?:CENT[ERD]+)?\s*(?:ON|AT|FROM)?\s*(\d+[NS])\s*(\d+[EW])",text),
                re.findall(r"(\d+)\s*(NM|M)\s*RADIUS.*?[^0-9](\d+[NS])\s*(\d+[EW])",text,re.DOTALL)
                ):
                try:
                    radius=float(radius)
                    if unit=="M":
                        radius=radius/1852.0
                    else:
                        assert unit=="NM"
                    centre=mapper.parse_coords(lat,lon)
                    coords=mapper.create_circle(centre,radius)
                    areas.append(dict(
                            points=coords,
                            kind="notamarea",
                            name=text,
                            type="notamarea",
                            notam_ordinal=u.appearnotam,
                            notam_line=u.appearline,
                            notam=text))
                except Exception,cause:
                    print "Invalid notam coords: %s,%s"%(lat,lon)
                    
                    
                    
                    
        for coordgroup in coordgroups:        
            try:
                coords=list(mapper.parse_lfv_area(coordgroup,False))
            except Exception,cause:
                print "Parsing,",coordgroup
                print "Exception parsing lfv area from notam:%s"%(cause,)
                coords=[]
            
            if len(coords)==0: continue
            if text.count("OBST") and (kind==None or kind=="obstacle"):
                elevs=re.findall(r"ELEV\s*(\d+)\s*FT",text)
                elevs=[int(x) for x in elevs if x.isdigit()]
                if len(elevs)!=0:                
                    elev=max(elevs)
                    for coord in coords:
                        obstacles.append(dict(
                            pos=coord,
                            elev=elev,
                            elevf=mapper.parse_elev(elev),
                            kind='notam',
                            notam_ordinal=u.appearnotam,
                            notam_line=u.appearline,
                            name=text.split("\n")[0],
                            notam=text))
                    continue
            couldbearea=True
            if len(coords)<=2:
                couldbearea=False
            if text.count("PSN")>=len(coords)-2:
                couldbearea=False
            if couldbearea==False and (kind==None or kind=="notam"):
                for coord in coords:
                    others.append(dict(
                        pos=coord,
                        kind='notam',
                        name=text,
                        notam_ordinal=u.appearnotam,
                        notam_line=u.appearline,
                        notam=text))
            if couldbearea==True and (kind==None or kind=="notamarea"):
                if len(coords)>2:
                    if text.startswith("AREA: "):
                        continue #These aren't real notams, they're area-specifications for all other notams... make this better some day.                        
                    areas.append(dict(
                        points=coords,
                        kind="notamarea",
                        name=text,
                        type="notamarea",
                        notam_ordinal=u.appearnotam,
                        notam_line=u.appearline,
                        notam=text))
Esempio n. 29
0
def ee_parse_gen_r2(url):
    spaces = []
    parser = lxml.html.HTMLParser()
    data, date = fetchdata.getdata(url, country='ee')
    parser.feed(data)
    tree = parser.close()
    print "Parsed tree"
    for tab in tree.xpath(".//table"):
        print "Found table"
        for idx, cand in enumerate(tab.xpath(".//tr")):
            if len(cand.getchildren()) < 3:
                continue
            space = dict()
            #print list(cand.getchildren())
            what, vert, remark = list(cand.getchildren())[0:3]
            whattxt = alltext(what).replace(u"–", "-").replace(u"\xa0", " ")

            verttxt = alltext(vert)

            while True:
                w = re.sub(ur"\(.*?\)", "", whattxt)
                if w != whattxt:
                    whattxt = w
                    continue
                break

            #print idx,whattxt
            if idx < 3:
                if idx == 1:
                    assert (whattxt.count("Identification")
                            or whattxt.count("ateral limits"))
                if idx == 2: assert whattxt.strip() == "1"
                continue
            verttxt = verttxt.replace(u"\xa0", u" ")
            vertlines = [x for x in verttxt.split("\n") if x.strip()]
            if len(vertlines) == 1:
                vertlines = [x for x in verttxt.split("  ") if x.strip()]
            print "Verlintes:", repr(vertlines)
            #print "wha------------------------ t",whattxt
            space['ceiling'], space['floor'] = vertlines[:2]
            mapper.parse_elev(space['ceiling'])
            ifloor = mapper.parse_elev(space['floor'])
            if ifloor >= 9500: continue
            lines = whattxt.split("\n")
            out = []
            merged = ""
            for line in lines[1:]:
                line = line.strip().replace(u"–", "-")
                if line == "": continue
                if line.endswith("point"):
                    out.append(line + " ")
                    continue
                if line.endswith("ircle with radius of") or line.endswith(
                        ",") or line.endswith("on") or line.endswith("radius"):
                    merged = " ".join([merged, line])
                    print "<---Merged:", merged
                    continue
                if merged:
                    line = " ".join([merged, line])
                merged = ""
                if not line.endswith("-"):
                    line = line + " -"
                out.append(line + "\n")

            space['name'] = lines[0].strip()
            w = "".join(out)
            print "Parsing:", w
            if space['name'].startswith('EER1 '):
                w = ee_parse_tma2.eer1txt
                fir = mapper.parse_coord_str(ee_parse_tma2.firtxt,
                                             context='estonia')
                fir_context = [fir]
                space['points'] = mapper.parse_coord_str(
                    w, fir_context=fir_context)
            else:
                space['points'] = mapper.parse_coord_str(w, context='estonia')
            space['type'] = 'R'
            space['date'] = date
            space['freqs'] = []
            space['url'] = fetchdata.getrawurl(url, 'ee')
            spaces.append(space)
    return spaces
Esempio n. 30
0
 altcand=[]
 for alt in page.get_lines(page.get_partially_in_rect(
                                 alt_x1,y1,alt_x2,y2)):
     if alt=="": break
     altcand.append(alt)
 print altcand
 h1,h2=altcand
 def fixupalt(x):
     print "Fixing",x
     fl,alt,gnd,unl=re.match(ur"(?:(FL\d+)|\d+\s*m\s*\((\d+)\s*ft\)|(GND)|(UNL))",x).groups()
     if fl: return fl
     if alt: return alt+"FT MSL"
     if gnd: return "GND"
     if unl: return "UNL"
 ceiling,floor=[fixupalt(h) for h in [h1,h2]]
 if mapper.parse_elev(floor)>=9500:
     continue
 kind,name=re.match("EP (TSA|TRA|TFR) ([\d\w]+)",tra.text).groups()            
 def fix_coords(s):
     
     def fixer(m):
         a,b,c,d, e,f,g,h=m.groups()
         return "%02d%02d%02d%s %03d%02d%02d%s - "%(int(a),int(b),int(c),d,
                                                    int(e),int(f),int(g),h)
     return re.sub(ur"(\d{2,3})°(\d{2})'(\d{2})''([NS])\s*(\d{2,3})°(\d{2})'(\d{2})''([EW])",fixer,s)
 coordstr2=fix_coords("".join(o)).rstrip().rstrip("-")
 print "COordstr:",coordstr2
 spaces.append(dict(
     name="EP %s %s"%(kind,name),
     points=mapper.parse_coord_str(coordstr2,context="poland"),
     ceiling=ceiling,
Esempio n. 31
0
 cstr=[]
 spacename=coordstr[0]
 assert spacename=="CTR"
 for sub in coordstr[1:]:
     cstr.append(sub.strip().rstrip("."))
 def fixfunc(m):
     return "".join(m.groups())
 raw=re.sub(ur"(\d{2,3})\s*(\d{2})\s*(\d{2})\s*([NSEW])",
                                      fixfunc,
                                      "".join(cstr)).replace(","," - ")
 print "parsing raw:",raw
 points=mapper.parse_coord_str(raw,context='lithuania')
                                      
 print "Limitstr",limitstr
 floor,ceiling=re.match(ur"(.*)\s*to\s*(.*)",limitstr).groups()
 mapper.parse_elev(floor)
 mapper.parse_elev(ceiling)
 
 spacenamestem=spacename.strip()
 if spacenamestem.endswith("CTR"):
     spacenamestem=spacenamestem[:-3].strip()
 if spacenamestem.endswith("FIZ"):
     spacenamestem=spacenamestem[:-3].strip()
 #construct names
 newfreqs=[]
 for serv,freq in freqs:
     serv=serv.strip()
     if serv=='TWR':
         servicelong="Tower"
     elif serv.startswith('APP'):
         servicelong="Approach"
Esempio n. 32
0
def parse_space(lines):
    idx = [0]
    out = []
    last = [None]
    translate = dict(ATS='TMA',
                     TIA='TMA',
                     TIZ='CTR',
                     TMA='TMA',
                     CTR='CTR',
                     DANGER="R",
                     RESTRICTED="R")
    try:

        def getline():
            if idx[0] == len(lines):
                raise StopIteration()
            r = lines[idx[0]]
            idx[0] += 1
            last[0] = r
            return r

        def peek():
            if idx[0] == len(lines):
                raise StopIteration()
            r = lines[idx[0]]
            return r

        def get(what):
            splat = getline().split("=")
            if len(splat) != 2:
                raise Exception("Expected <%s=?>, got <%s>" % (what, splat))
            key, val = splat
            if key != what:
                raise Exception("Expected <%s>, got <%s>" % (what, splat))
            assert key == what
            return val.strip()

        def isnext(what):
            line = peek()
            if line.startswith(what):
                return True
            return False

        while True:
            TYPE = get("TYPE")
            freqs = []
            if isnext("SUBTYPE"):
                SUBTYPE = get("SUBTYPE")
            else:
                SUBTYPE = None
            if isnext("REF"):
                REF = get("REF")
                if isnext("ACTIVE"):
                    getline()
                if isnext("TITLE"):
                    TITLE = get("TITLE")
                else:
                    TITLE = None
                CLASS = SUBTYPE = RADIO = None
                if TYPE == "DANGER":
                    name = "D-" + REF
                else:
                    name = REF
                type_ = translate[TYPE]
            else:
                if not SUBTYPE:
                    SUBTYPE = get("SUBTYPE")
                type_ = translate[SUBTYPE]
                CLASS = get("CLASS")
                RADIO = get("RADIO")
                REF = None
                notes = []
                while isnext("NOTES"):
                    notes.append(get("NOTES"))
                TITLE = get("TITLE")
                name = " ".join([TITLE.strip(), SUBTYPE])
                for radio in [RADIO] + notes:
                    radioname, freq = re.match(
                        ur"(.*?)\s*(\d{3}\.\d{3}\s*(?:and)?)+",
                        radio).groups()
                    fr = re.findall(ur"\d{3}\.\d{3}", freq)
                    for f in fr:
                        if float(f) < 200.0:
                            freqs.append((radioname, float(f)))
            if isnext("BASE"):
                BASE = get("BASE")
                TOPS = get("TOPS")
            print freqs
            points = []
            area = []
            while True:
                if isnext("POINT"):
                    p = get("POINT")
                    area.append(p)
                    continue
                if isnext('CLOCKWISE'):
                    radius, center, dest = re.match(
                        ur"CLOCKWISE RADIUS=(\d+\.?\d*) CENTRE=(\d+\.?\d*N \d+\.?\d*E) TO=(\d+\.?\d*N \d+\.?\d*E)",
                        getline()).groups()
                    area.append(
                        ur"clockwise along an arc with radius %s NM centred on %s to the point %s"
                        % (radius, center, dest))
                    continue
                if isnext('CIRCLE'):
                    l = getline()
                    radius, center = re.match(
                        ur"CIRCLE RADIUS=(\d+\.?\d*) CENTRE=(\d+\.?\d*N \d+\.?\d*E)",
                        l).groups()
                    area.append("A circle with radius %s NM centred on %s" %
                                (radius, center))
                break
            points = " - ".join(area)
            if isnext("BASE"):
                BASE = get("BASE")
                TOPS = get("TOPS")

            def elev(x):
                print x
                if x == "SFC": return "GND"
                if x == "UNL": return "UNL"
                if x.lower().startswith("fl"):
                    assert x[2:].isdigit()
                    return "FL%03d" % (int(x[2:]))
                assert x.isdigit()
                return "%d ft MSL" % (int(x), )

            floor = elev(BASE)
            ceiling = elev(TOPS)
            floorint = mapper.parse_elev(floor)
            ceilint = mapper.parse_elev(ceiling)
            if floorint >= 9500 and ceilint >= 9500:
                continue
            out.append(
                dict(name=name,
                     floor=floor,
                     ceiling=ceiling,
                     freqs=freqs,
                     points=points,
                     type=type_,
                     date="2010-01-01T00:00:00Z"))
    except StopIteration:
        pass
    except Exception:
        print "Last parsed:", last
        raise
    else:
        raise Exception("Unexpected erorr")
    return out
Esempio n. 33
0
def ev_parse_x(url):
    out = []
    parser = lxml.html.HTMLParser()
    data, date = fetchdata.getdata(url, country="ev")
    parser.feed(data)
    tree = parser.close()
    got_fir = False
    for table in tree.xpath("//table"):
        # print "Table with %d children"%(len(table.getchildren()),)
        rows = list(table.xpath(".//tr"))

        # for idx,col in enumerate(cols):
        #    print "Col %d, %s"%(idx,alltext(col)[:10])
        headingcols = rows[0].xpath(".//th")
        if len(headingcols) == 0:
            continue
        name, alt = headingcols[0:2]
        if alltext(name).count("QNH") and len(headingcols) > 6:
            continue
        print alltext(name)
        assert alltext(name).lower().count("name") or alltext(name).lower().count("lateral")
        print alltext(alt)
        assert alltext(alt).lower().count("limit")

        for row in rows[1:]:
            cols = list(row.xpath(".//td"))
            if len(cols) < 2:
                continue
            name, alt = cols[:2]
            lines = [x.strip() for x in alltext(name).split("\n") if x.strip()]
            if len(lines) == 0:
                continue
            assert len(lines)

            spacename = lines[0].strip()
            if spacename.strip() == "A circle radius 0,5 NM centered on 565705N 0240619E EVR2 RIGA":
                spacename = "EVR2 RIGA"
                lines = [spacename, lines[0][: -len(spacename)].strip()] + lines[1:]
            print spacename
            if spacename.strip() == "SKRIVERI":
                continue
            print "Spacename is:", spacename
            assert (
                spacename[:3] in ["EVR", "EVP", "TSA", "TRA"]
                or spacename.endswith("ATZ")
                or spacename.endswith("ATZ (MILITARY)")
            )

            altcand = []
            for altc in alltext(alt).split("\n"):
                if altc.count("Real-time"):
                    continue
                altcand.append(altc.strip())
            print "Altcands:", altcand
            ceiling, floor = [x.strip() for x in " ".join(altcand).split("/")]
            ceiling = strangefix(ceiling)
            floor = strangefix(floor)

            mapper.parse_elev(ceiling)
            ifloor = mapper.parse_elev(floor)
            iceiling = mapper.parse_elev(ceiling)
            if ifloor >= 9500 and iceiling >= 9500:
                continue
            assert ifloor < iceiling

            freqs = []
            raw = " ".join(lines[1:])
            raw = re.sub(s(ur"Area bounded by lines successively joining the following points:"), "", raw)
            print "Raw:", raw

            coords = mapper.parse_coord_str(raw, context="latvia")
            for cleaned in clean_up_polygon(coords):
                out.append(
                    dict(
                        name=spacename,
                        points=cleaned,
                        type="R",
                        freqs=freqs,
                        floor=floor,
                        url=url,
                        date=date,
                        ceiling=ceiling,
                    )
                )

    return out
Esempio n. 34
0
def ev_parse_tma():
    out = []
    parser = lxml.html.HTMLParser()
    # url="/Latvia_EV-ENR-2.1-en-GB.html"
    cur_airac = get_cur_airac()
    url = "/eAIPfiles/%s-AIRAC/html/eAIP/EV-ENR-2.1-en-GB.html" % (cur_airac,)

    data, date = fetchdata.getdata(url, country="ev")
    parser.feed(data)
    tree = parser.close()

    got_fir = False
    for table in tree.xpath("//table"):
        # print "Table with %d children"%(len(table.getchildren()),)
        rows = list(table.xpath(".//tr"))
        for idx in xrange(5):
            headingrow = rows[idx]
            cols = list(headingrow.xpath(".//th"))
            # print len(cols)
            if len(cols) == 5:
                break
        else:
            raise Exception("No heading row")
        assert idx == 0
        # for idx,col in enumerate(cols):
        #    print "Col %d, %s"%(idx,alltext(col)[:10])
        nameh, unith, callsignh, freqh, remarkh = cols
        assert alltext(nameh).lower().count("name")
        assert alltext(unith).lower().count("unit")
        assert re.match(ur"call\s*sign", alltext(callsignh).lower())
        lastcols = None
        for row in rows[1:]:
            cols = list(row.xpath(".//td"))
            if len(cols) == 5:
                name, unit, callsign, freq, remark = cols
                lastcols = cols
            else:
                if lastcols:
                    unit, callsign, freq, remark = lastcols[1:]
                    name = cols[0]
                else:
                    continue

            lines = [x.strip() for x in alltext(name).split("\n") if x.strip()]
            if len(lines) == 0:
                continue
            spacename = lines[0].strip()

            if re.match(ur"RIGA\s*UTA|RIGA\s*CTA|RIGA\s*AOR.*", spacename):
                continue
            freqstr = alltext(freq)
            callsignstr = alltext(callsign)
            if freqstr.strip():
                print freqstr
                freqmhzs = re.findall(ur"\d{3}\.\d{3}", freqstr)
                assert len(freqmhzs) <= 2
                callsigns = [callsignstr.split("\n")[0].strip()]
                freqs = []
                for idx, freqmhz in enumerate(freqmhzs):
                    if freqmhz == "121.500":
                        continue
                    freqs.append((callsigns[idx], float(freqmhz)))
                print "freqs:", freqs
            else:
                freqs = []
            assert len(lines)

            classidx = next(idx for idx, x in reversed(list(enumerate(lines))) if x.lower().count("class of airspace"))

            if re.match(ur"RIGA\s*FIR.*UIR", spacename, re.UNICODE):
                got_fir = True
                lastspaceidx = classidx - 2
                floor = "GND"
                ceiling = "-"
                type_ = "FIR"
            else:
                if lines[classidx - 1].count("/") == 1:
                    floor, ceiling = lines[classidx - 1].split("/")
                    lastspaceidx = classidx - 1
                else:
                    floor = lines[classidx - 1]
                    ceiling = lines[classidx - 2]
                    lastspaceidx = classidx - 2
                ceiling = strangefix(ceiling)
                floor = strangefix(floor)

                mapper.parse_elev(ceiling)
                mapper.parse_elev(floor)
                type_ = "TMA"
            tcoords = lines[1:lastspaceidx]
            # verify that we got actual altitudes:
            coords = []
            for coord in tcoords:
                coord = coord.strip().replace("(counter-)", "").replace("(RIGA DVOR - RIA)", "")
                if coord.endswith(u"E") or coord.endswith("W"):
                    coord = coord + " -"
                coords.append(coord)

            raw = " ".join(coords)
            raw = re.sub(s(ur"Area bounded by lines successively joining the following points:"), "", raw)
            print "Raw:", raw
            coords = mapper.parse_coord_str(raw, context="latvia")
            for cleaned in clean_up_polygon(coords):
                out.append(
                    dict(
                        name=spacename,
                        points=cleaned,
                        type=type_,
                        freqs=freqs,
                        floor=floor,
                        url=url,
                        date=date,
                        ceiling=ceiling,
                    )
                )
                if type_ == "FIR":
                    out[-1]["icao"] = "EVRR"
Esempio n. 35
0
def parse_page(parser,pagenr,kind="TMA",last_sector=dict()):   
    if kind=="TMA":
        thirdcols=["ATC unit","AFIS unit"]
    elif kind=="sector":
        thirdcols=["FREQ"]
    elif kind=="R":
        thirdcols=["Remarks (nature of hazard,"]
    else:
        raise Exception("Bad kind")
    page=parser.parse_page_to_items(pagenr)
    items=page.items
    #print "Items:",pitems    

    #print "Possible Areas:"
    headings=[]
    for item in items:        
        if item.text==None: continue
        item.text=item.text.strip()
        if item.text=="": continue
        if item.text=="Name": continue
        if item.y1<25 and item.text in (["Lateral limits","Vertical limits"]+thirdcols):
            headings.append(item)  
    
    headings.sort(key=lambda x:x.x1)    
    #print "found candidates:",zone_candidates    
    if len(headings)==0:
        return []
    avg_heading_y=sum(h.y1 for h in headings)/float(len(headings))
    uprint("Found headings:",headings)
    zone_candidates=[]
    for item in items:        
        if item.text==None or item.text.strip()=="": continue
        if item.text.strip().startswith("AMDT"): continue
        if item.text.strip().startswith("The LFV Group"): continue
        if re.match(ur"\s*LFV\s*AIRAC\s*AMDT\s*\d+/\d+\s*",item.text): continue
        if item.text.strip()=="LFV": continue
        if item.text.count('Terminal Information Areas'): continue
        if item.text.strip().startswith("AIRAC"): continue        
        if kind=="R" and not is_r_or_danger_area_name(item.text.strip()):
            continue
        if item.y1>avg_heading_y+1 and item.x1<12 and not item.text in ["Name",'None',"LFV"]:
            if item.text.count("Established") or item.text.count(u'TROLLHÄTTAN TWR') or item.text.count(u'and/or SÅTENÄS') or item.text.count(u'TWR/TMC') or item.text.strip().endswith("TWR") or item.text.strip().endswith("TWR."):
                continue
            if item.text.count("operational hours") or item.text.count("See AIP DENMARK"):
                continue
            if item.text.count("hours of"):
                continue
            if item.text.count("Upper limit"):
                continue
            if item.text.count("that part") or item.text.count("coincides"):
                continue
            if item.text.count(u'Danger area EK D395 and') or item.text.count(u'D396 are situated within') or item.text.strip()=="TMA":
                continue
            if item.text.count(u'ÖSTGÖTA TMC is closed') or item.text.count(u'and SKAVSTA TWR is') or item.text.strip()=='open.':
                continue
            if item.text.count("SAT 0530"): 
                continue
            if item.text.strip()=='OPS': 
                continue
            if item.text.strip()==u'ÖSTGÖTA TMC:': 
                continue
            if item.text.count(u'is open') or item.text.count('is closed'):
                continue
            if item.text.count('MON-FRI') or item.text.count('2150'): 
                continue
            lines2=page.get_lines(page.get_partially_in_rect(12,item.y1+0.2,40,item.y2-0.2))
            if len(lines2):
                zone_candidates.append(item)
    
    uprint("Found cands:",zone_candidates)
    zone_candidates.sort(key=lambda x:x.y1)
    
    for zone in zone_candidates:
        #uprint("Zone:",zone)
        #assert not zone.text.count("AOR")
        assert not zone.text.count("FIR")
    
    uprint("Headings:",headings)        
    print "Pagenr:",pagenr
    assert len(headings)==3
    
    
    
    ret=[]
    for i in xrange(len(zone_candidates)):
        d=dict()
        cand=zone_candidates[i]
        if i<len(zone_candidates)-1:
            nextcand=zone_candidates[i+1]
        else:
            nextcand=None
        y1=cand.y1-0.25
        y2=100
        if nextcand: y2=nextcand.y1-0.75
        for j in xrange(len(headings)):
            head=headings[j]
            if j<len(headings)-1:
                nexthead=headings[j+1]
            else:
                nexthead=None
            x1=head.x1
            x2=head.x2
            if j==len(headings)-1:                
                x1=headings[j-1].x2+3
                x2=100
            lines=page.get_lines(page.get_partially_in_rect(x1,y1,x2,y2,xsort=True,ysort=True))
            #print ("Parsed %s y,%d-%d, %s: <%s>\n\n"%(cand.text,y1,y2,head.text,lines)).encode('utf8')
            d[head.text]=lines        
        
        if kind=="R":
            if y2==100: y2=y1+3
            d['name']=" ".join(x.strip() for x in filter_head_foot(page.get_lines(page.get_partially_in_rect(0,y1,10,y2,xsort=True,ysort=True))))
        else:
            d['name']=cand.text.strip()
        ret.append(d)  


    allow_head=2
    print "Doing fixups--------------------------------------------------"
    tret=[]
    for x in ret:
        #print "Fixing up",x,"allow:",allow_head
        area="".join(x['Lateral limits']).strip()
        if allow_head==2 and area!="" and x['name'].strip()!="":
            allow_head=1
            
        if allow_head!=1:
            if len(tret):
                tret[-1]['Lateral limits']+=x['Lateral limits']
                tret[-1]['Vertical limits']+=x['Vertical limits']
        else:
            tret.append(x)
        
        if allow_head==1:
            allow_head=0
                
        if not area.endswith('-') and area!="":
            allow_head=2
            
        #print "   Fixed up up",x
    ret=tret
    for line in ret:
        print "Fixed:",line['name']," = ",line['Lateral limits'],line['Vertical limits']
    out=[]
    for d in ret:
        pa=dict()
        curname=d['name']
        if curname.count(u'Förteckning över'): continue
        print "D:",d
        arealines=[l for l in d['Lateral limits'] if l.strip()!=""]
        last_coord_idx=None
        #uprint("D:<%s> (area:%s)"%(d,arealines))
        if 'FREQ' in d:
            freqs=[("SWEDEN CONTROL",float(x)) for x in re.findall(r"\d{3}\.\d{3}","\n".join(d['FREQ']))]
            #print "Parsed freqs:",freqs
            if freqs:
                last_sector['freqs']=freqs
            
        if kind=='sector':            
            m=re.match(r"ES[A-Z]{2}\s*ACC\s*sector\s*([0-9a-zA-Z]*)",d['name'])
            if m:
                last_sector['major']=d['name']
                last_sector['majorsector'],=m.groups()
            if len(arealines)==0:
                last_sector['name']=d['name']
                continue
            
            if d['name'].count("Control Area and Upper Control Area"): continue        
            if d['name'].count("SUECIA CTA"): continue        
            if d['name'].count("SUECIA UTA"): continue
            
            m=re.match(r"([0-9a-zA-Z]*)(:.*)",d['name'])
            if m and 'majorsector' in last_sector:
                sectorname,sub=m.groups()
                if sectorname==last_sector['majorsector']:
                    d['name']=last_sector['major']+sub
                    #uprint("Fixed up name: ",d['name'])
        #print "Arealines:",arealines
        assert len(arealines)
        if arealines[0].strip()=="Danger area EK D395 and D396 are":
            arealines=arealines[1:]
        if arealines[0].strip()=="situated within TMA":
            arealines=arealines[1:]
            
        if arealines==u'Förteckning över CTA / Lists of CTA' or arealines=='Lateral limits':
            continue

        for idx in xrange(len(arealines)):
            if arealines[idx].lower().startswith("established"):
                last_coord_idx=idx
                pa['established']=" ".join(l for l in arealines[idx:])   
                break
            if arealines[idx].lower().startswith("danger area"):
                last_coord_idx=idx
                break
            if arealines[idx].strip()=="LFV":
                last_coord_idx=idx
                break
        if last_coord_idx==None:
            last_coord_idx=len(arealines)
        #uprint("ARealines:",arealines)
        #uprint("Last coord:",arealines[last_coord_idx-1])
        if len(arealines)>last_coord_idx:
            if arealines[last_coord_idx-1:last_coord_idx+1]==[u'571324N 0161129E -', u'Established during operational hours of']:
                arealines[last_coord_idx-1]=arealines[last_coord_idx-1].strip("-")
        #uprint("Last fixed:",arealines[last_coord_idx-1])
        assert not arealines[last_coord_idx-1].strip().endswith("-")
        #for idx in xrange(last_coord_idx-1):
        #    print "arealine: <%s>"%(arealines[idx].strip(),)
        #    assert arealines[idx].strip().endswith("-") or arealines[idx].strip().endswith("to")
        
        vertlim=u" ".join(d['Vertical limits'])
        if vertlim.strip()=="":
            #print "Object with no vertical limits: %s"%(repr(d['name']),)
            continue
        
        if d['name']=='Control Area':
            continue

        uprint("Vertlim: ",vertlim)
        heightst=re.findall(r"(FL\s*\d{3})|(\d+\s*ft\s*(?:\s*/\s*\d+\s*.\s*GND)?(?:\s*GND)?)|(GND)|(UNL)",vertlim)
        uprint("Height candidates:",heightst)
        heights=[]
        for fl,ht,gnd,unl in heightst:
            if fl:
                heights.append(fl)
            if ht:
                heights.append(ht.strip())
            if gnd:
                heights.append(gnd.strip())
            if unl:
                heights.append(unl.strip())
        uprint("heights for ",d['name'],":",repr(heights))
        if len(heights)==0 and d['name']==u'GÖTEBORG TMA':
            heights=['GND','FL95']
        if len(heights)==1 and d['name']==u'Göteborg TMA':
            heights=['4500','FL95']
        assert len(heights)==2
        ceiling=heights[0].strip()
        floor=heights[1].strip()
                
        pa['name']=d['name']
        pa['floor']=floor
        pa['ceiling']=ceiling
        if mapper.parse_elev(floor)>=9500:
            continue
        #uprint("Arealines:\n================\n%s\n============\n"%(arealines[:last_coord_idx]))
        #print pa
        areacoords=" ".join(arealines[:last_coord_idx])
        pa['points']=parse_coord_str(areacoords)
        
        
        vs=[]
        for p in pa['points']:
            #print "from_str:",repr(p)
            x,y=mapper.latlon2merc(mapper.from_str(p),13)
            vs.append(Vertex(int(x),int(y)))

        p=Polygon(vvector(vs))
        if p.calc_area()<=30*30:
            pass#print pa
            #print "Area:",p.calc_area()
        assert p.calc_area()>30*30
        #print "Area: %f"%(p.calc_area(),)
        #print "Point-counts:",len(pa['points'])
        for p in pa['points']:
            assert p.count(",")==1 
        pa['type']=kind
        for thirdcol in thirdcols:
            if thirdcol in d:
                atc=d[thirdcol]
                break
        else:
            raise Exception("missing thirdcol")
        #print "ATc: <%s>"%(repr(atc),)
        freqs=[(y,float(x)) for x,y in re.findall(r"(\d{3}\.\d{3})\s*MHz\n(.*)","\n".join(atc))]
        if not freqs:
            freqs=last_sector.get('freqs',[])
        #print repr(freqs)
        pa['freqs']=freqs
        #uprint("Cleaning up ",pa['name'])
        for cleaned in clean_up_polygon(list(pa['points'])):
            d=dict(pa)
            #print "cleaned",cleaned
            for i,tup in enumerate(cleaned):
                assert type(tup)==str
                latlon=mapper.from_str(tup)
                lat,lon=latlon
                assert lat>=-85 and lat<=85
            d['points']=cleaned
            #uprint("cleaned:",pa['name'],len(cleaned),cleaned)
            #print "name:",d['name']
            #print "cleaned points:",d['points']
            #print "from:",areacoords
            #raise Exception()
            out.append(d)
        #if pa['name'].lower().count("esrange"):
        #    print "Exit esrange"
        #    sys.exit(1)
                    
    return out
Esempio n. 36
0
def parse_space(lines):    
    idx=[0]
    out=[]
    last=[None]
    translate=dict(
        ATS='TMA',
        TIA='TMA',
        TIZ='CTR',
        TMA='TMA',
        CTR='CTR',
        DANGER="R",
        RESTRICTED="R"
        )
    try:
        def getline():
            if idx[0]==len(lines):
                raise StopIteration()
            r=lines[idx[0]]
            idx[0]+=1
            last[0]=r
            return r
        def peek():
            if idx[0]==len(lines):
                raise StopIteration()
            r=lines[idx[0]]
            return r
        def get(what):
            splat=getline().split("=")
            if len(splat)!=2:
                raise Exception("Expected <%s=?>, got <%s>"%(what,splat))
            key,val=splat
            if key!=what:
                raise Exception("Expected <%s>, got <%s>"%(what,splat))
            assert key==what
            return val.strip()
        def isnext(what):
            line=peek()
            if line.startswith(what):
                return True
            return False
        while True:
            TYPE=get("TYPE")
            freqs=[]
            if isnext("SUBTYPE"):
                SUBTYPE=get("SUBTYPE")
            else:
                SUBTYPE=None
            if isnext("REF"):
                REF=get("REF")
                if isnext("ACTIVE"):
                    getline()     
                if isnext("TITLE"):                       
                    TITLE=get("TITLE")
                else:
                    TITLE=None
                CLASS=SUBTYPE=RADIO=None
                if TYPE=="DANGER":
                    name="D-"+REF
                else:
                    name=REF
                type_=translate[TYPE]
            else:
                if not SUBTYPE:
                    SUBTYPE=get("SUBTYPE")                
                type_=translate[SUBTYPE]
                CLASS=get("CLASS")
                RADIO=get("RADIO")
                REF=None
                notes=[]
                while isnext("NOTES"):
                    notes.append(get("NOTES"))
                TITLE=get("TITLE")
                name=" ".join([TITLE.strip(),SUBTYPE])
                for radio in [RADIO]+notes:
                    radioname,freq=re.match(ur"(.*?)\s*(\d{3}\.\d{3}\s*(?:and)?)+",radio).groups()
                    fr=re.findall(ur"\d{3}\.\d{3}",freq)
                    for f in fr:
                        if float(f)<200.0:
                            freqs.append((radioname,float(f)))
            if isnext("BASE"):
                BASE=get("BASE")
                TOPS=get("TOPS")            
            print freqs
            points=[]
            area=[]
            while True:
                if isnext("POINT"):
                    p=get("POINT")
                    area.append(p)
                    continue
                if isnext('CLOCKWISE'):
                    radius,center,dest=re.match(ur"CLOCKWISE RADIUS=(\d+\.?\d*) CENTRE=(\d+\.?\d*N \d+\.?\d*E) TO=(\d+\.?\d*N \d+\.?\d*E)",getline()).groups()
                    area.append(ur"clockwise along an arc with radius %s NM centred on %s to the point %s"%(radius,center,dest))
                    continue
                if isnext('CIRCLE'):
                    l=getline()
                    radius,center=re.match(ur"CIRCLE RADIUS=(\d+\.?\d*) CENTRE=(\d+\.?\d*N \d+\.?\d*E)",l).groups()
                    area.append("A circle with radius %s NM centred on %s"%(radius,center))
                break
            points=" - ".join(area)
            if isnext("BASE"):
                BASE=get("BASE")
                TOPS=get("TOPS")            
            
            def elev(x):
                print x
                if x=="SFC": return "GND"
                if x=="UNL": return "UNL"
                if x.lower().startswith("fl"):
                    assert x[2:].isdigit()
                    return "FL%03d"%(int(x[2:]))
                assert x.isdigit()
                return "%d ft MSL"%(int(x),)
            floor=elev(BASE)
            ceiling=elev(TOPS)
            floorint=mapper.parse_elev(floor)
            ceilint=mapper.parse_elev(ceiling)
            if floorint>=9500 and ceilint>=9500:
                continue
            out.append(
                dict(
                     name=name,
                     floor=floor,
                     ceiling=ceiling,
                     freqs=freqs,
                     points=points,
                     type=type_,
                     date="2010-01-01T00:00:00Z")
                     )
    except StopIteration:
        pass
    except Exception:
        print "Last parsed:",last
        raise
    else:
        raise Exception("Unexpected erorr")
    return out
Esempio n. 37
0
def ee_parse_gen_r2(url):
    spaces=[]
    parser=lxml.html.HTMLParser()
    data,date=fetchdata.getdata(url,country='ee')
    parser.feed(data)
    tree=parser.close()
    print "Parsed tree"
    for tab in tree.xpath(".//table"):
        print "Found table"
        for idx,cand in enumerate(tab.xpath(".//tr")):
            if len(cand.getchildren())<3:
                continue
            space=dict()
            #print list(cand.getchildren())
            what,vert,remark=list(cand.getchildren())[0:3]         
            whattxt=alltext(what).replace(u"–","-").replace(u"\xa0"," ")
            
            verttxt=alltext(vert)
            
            while True:
                w=re.sub(ur"\(.*?\)","",whattxt)
                if w!=whattxt:
                    whattxt=w 
                    continue
                break
            
            #print idx,whattxt
            if idx<3:
                if idx==1: assert (whattxt.count("Identification") or whattxt.count("ateral limits"))
                if idx==2: assert whattxt.strip()=="1"
                continue 
            verttxt=verttxt.replace(u"\xa0",u" ")
            vertlines=[x for x in verttxt.split("\n") if x.strip()]
            if len(vertlines)==1:
                vertlines=[x for x in verttxt.split("  ") if x.strip()]
            print "Verlintes:",repr(vertlines)
            #print "wha------------------------ t",whattxt
            space['ceiling'],space['floor']=vertlines[:2]
            mapper.parse_elev(space['ceiling'])
            ifloor=mapper.parse_elev(space['floor'])
            if ifloor>=9500: continue
            lines=whattxt.split("\n")
            out=[]
            merged=""
            for line in lines[1:]:
                line=line.strip().replace(u"–","-")
                if line=="":continue
                if line.endswith("point"):
                    out.append(line+" ")
                    continue
                if line.endswith("ircle with radius of") or line.endswith(",") or line.endswith("on") or line.endswith("radius"):
                    merged=" ".join([merged,line])
                    print "<---Merged:",merged
                    continue
                if merged:
                    line=" ".join([merged,line])
                merged=""
                if not line.endswith("-"):
                    line=line+" -"
                out.append(line+"\n")
            
            space['name']=lines[0].strip()
            w="".join(out)
            print "Parsing:",w
            if space['name'].startswith('EER1 '):                
                w=ee_parse_tma2.eer1txt
                fir=mapper.parse_coord_str(ee_parse_tma2.firtxt,context='estonia')
                fir_context=[fir]
                space['points']=mapper.parse_coord_str(w,fir_context=fir_context)
            else:
                space['points']=mapper.parse_coord_str(w,context='estonia')
            space['type']='R'
            space['date']=date
            space['freqs']=[]
            space['url']=fetchdata.getrawurl(url,'ee')            
            spaces.append(space)
    return spaces