コード例 #1
0
ファイル: ev_parse_tma.py プロジェクト: avl/SwFlightPlanner
def ev_parse_obst():
    cur_airac = get_cur_airac()
    url = "/eAIPfiles/%s-AIRAC/html/eAIP/EV-ENR-5.4-en-GB.html" % (cur_airac,)
    # url="/EV-ENR-5.4-en-GB.html"
    parser = lxml.html.HTMLParser()
    data, date = fetchdata.getdata(url, country="ev")
    parser.feed(data)
    tree = parser.close()
    got_fir = False
    res = []
    for table in tree.xpath("//table"):
        for row in table.xpath(".//tr"):
            tds = row.xpath(".//td")
            if len(tds) != 5:
                continue
            name, type, coord, elev, light = [alltext(x) for x in tds]
            elev, height = elev.split("/")
            res.append(
                dict(
                    name=name,
                    pos=mapper.parsecoord(coord),
                    height=mapper.parse_elev(height.strip()),
                    elev=mapper.parse_elev(elev),
                    lighting=light,
                    kind=type,
                )
            )
    return res
コード例 #2
0
ファイル: ev_parse_tma.py プロジェクト: avl/SwFlightPlanner
def ev_parse_r():
    out = []
    cur_airac = get_cur_airac()
    # url="/eAIPfiles/%s-AIRAC/html/eAIP/EV-ENR-2.1-en-GB.html"%(cur_airac,)

    out.extend(ev_parse_x(url="/eAIPfiles/%s-AIRAC/html/eAIP/EV-ENR-5.1-en-GB.html" % (cur_airac,)))
    out.extend(ev_parse_x(url="/eAIPfiles/%s-AIRAC/html/eAIP/EV-ENR-5.2-en-GB.html" % (cur_airac,)))
    out.extend(ev_parse_x(url="/eAIPfiles/%s-AIRAC/html/eAIP/EV-ENR-5.3-en-GB.html" % (cur_airac,)))
    out.extend(ev_parse_x(url="/eAIPfiles/%s-AIRAC/html/eAIP/EV-ENR-5.5-en-GB.html" % (cur_airac,)))

    return out
コード例 #3
0
ファイル: ev_parse_tma.py プロジェクト: dimme/SwFlightPlanner
def ev_parse_r():
    out = []
    cur_airac = get_cur_airac()
    #url="/eAIPfiles/%s-AIRAC/html/eAIP/EV-ENR-2.1-en-GB.html"%(cur_airac,)

    out.extend(
        ev_parse_x(url="/eAIPfiles/%s-AIRAC/html/eAIP/EV-ENR-5.1-en-GB.html" %
                   (cur_airac, )))
    out.extend(
        ev_parse_x(url="/eAIPfiles/%s-AIRAC/html/eAIP/EV-ENR-5.2-en-GB.html" %
                   (cur_airac, )))
    out.extend(
        ev_parse_x(url="/eAIPfiles/%s-AIRAC/html/eAIP/EV-ENR-5.3-en-GB.html" %
                   (cur_airac, )))
    out.extend(
        ev_parse_x(url="/eAIPfiles/%s-AIRAC/html/eAIP/EV-ENR-5.5-en-GB.html" %
                   (cur_airac, )))

    return out
コード例 #4
0
def ev_parse_sigpoints():
    out=[]
    parser=lxml.html.HTMLParser()
    airac=get_cur_airac()
    url="/eAIPfiles/%s-AIRAC/html/eAIP/EV-ENR-4.4-en-GB.html"%(airac)
    data,date=fetchdata.getdata(url,country='ev')
    parser.feed(data)
    tree=parser.close()
    for table in tree.xpath("//table"):
        #print "Table with %d children"%(len(table.getchildren()),)
        rows=list(table.xpath(".//tr"))
        for row in rows:
            hdr=list(row.xpath(".//th"))
            if hdr: continue
            cols=list(row.xpath(".//td"))
            pos=mapper.parsecoord(alltext(cols[1]))
            nameraw=alltext(cols[0])
            print "raw:",repr(nameraw)
            name,=re.match(ur"\s*(\w{5})\s*",nameraw).groups()

            out.append(dict(name=name,
                kind='sig. point',
                pos=pos))
              
    for manual in """PARKS:570014N 0241039E:entry/exit point
VISTA:565002N 0241034E:entry/exit point
ARNIS:565427N 0234611E:entry/exit point
KISHI:565609N 0234608E:entry/exit point
HOLDING WEST:565530N 0235327E:holding point
HOLDING EAST:565351N 0240313E:holding point""".split("\n"):
        name,poss,kind=manual.split(":")
        out.append(dict(
            name=name.strip(),
            pos=mapper.parsecoord(poss),            
            kind=kind))
        

    return out
コード例 #5
0
ファイル: ev_parse_tma.py プロジェクト: dimme/SwFlightPlanner
def ev_parse_obst():
    cur_airac = get_cur_airac()
    url = "/eAIPfiles/%s-AIRAC/html/eAIP/EV-ENR-5.4-en-GB.html" % (cur_airac, )
    #url="/EV-ENR-5.4-en-GB.html"
    parser = lxml.html.HTMLParser()
    data, date = fetchdata.getdata(url, country="ev")
    parser.feed(data)
    tree = parser.close()
    got_fir = False
    res = []
    for table in tree.xpath("//table"):
        for row in table.xpath(".//tr"):
            tds = row.xpath(".//td")
            if len(tds) != 5: continue
            name, type, coord, elev, light = [alltext(x) for x in tds]
            elev, height = elev.split("/")
            res.append(
                dict(name=name,
                     pos=mapper.parsecoord(coord),
                     height=mapper.parse_elev(height.strip()),
                     elev=mapper.parse_elev(elev),
                     lighting=light,
                     kind=type))
    return res
コード例 #6
0
def ev_parse_airfields():
    ads=[]
    spaces=[]
    seen=set()
    cur_airac=get_cur_airac()
    assert cur_airac
    for icao in ["EVRA",
                "EVLA",
                "EVTJ",
                "EVVA"]:
        thrs=[]
        url="/eAIPfiles/%s-AIRAC/html/eAIP/EV-AD-2.%s-en-GB.html"%(cur_airac,icao)
        data,date=fetchdata.getdata(url,country='ev')
        parser=lxml.html.HTMLParser()
        parser.feed(data)
        tree=parser.close()
        elev=None
        pos=None
        ctrarea=None
        ctr=None
        ctralt=None
        ctrname=None
        adcharturl=None
        adchart=None
        adnametag,=tree.xpath("//p[@class='ADName']")
        adnamestr=alltext(adnametag)
        print adnamestr
        name,=re.match(ur"%s\s*[-—]\s*([\w\s]+)"%(icao,),adnamestr,re.UNICODE).groups()
        freqs=[]
        for table in tree.xpath("//table"):
            rows=list(table.xpath(".//tr"))
            
            headings=list(table.xpath(".//th"))
            
            if len(headings)==5:
                if headings[2]=="Frequency":
                    for row in rows:
                        cols=alltexts(table.xpath(".//td"))
                        desig,name=cols[0:2]
                        freq,=re.match(ur"\d{3}\.\d{3}\s*MHz",cols[2]).groups()
                        if freq!="121.500":
                            freqs.append((desig+" "+name,float(freq)))                        
                        
                    continue
                
            
            for row in rows:
                cols=alltexts(row.xpath(".//td"))
                print "cols:",repr(cols)
                if len(cols)<2: continue
                if not pos and re.match(ur".*ARP\s*coordinates.*",cols[1]):
                    pos,=mapper.parsecoords(cols[2])
                if not elev and re.match(ur"Elevation.*",cols[1]):
                    elev,=re.match(ur"(\d+) FT.*",cols[2]).groups()
                
                if not ctr and re.match(ur"Designation\s*and\s*lateral\s*limits",cols[1]):
                    lines=cols[2].split("\n")
                    ctr=True
                    print "Got lateral limits",lines[0]
                    try:
                        ctrname,type_=re.match(ur"^([\w\s]+)(CTR|TIZ)",lines[0]).groups()
                        ctrarea=" ".join(lines[1:])
                    except:
                        ctrname,type_=re.match(ur"^([\w\s]+)(CTR|TIZ)",lines[0]+lines[1]).groups()
                        ctrarea=" ".join(lines[2:])
                    assert ctrname.strip()
                    ctrname=ctrname.strip()+" "+type_
                    
                #print ".",cols[1],"."
                if not ctralt and re.match(ur".*Vertical\s*limits.*",cols[1],re.UNICODE):
                    ctralt=True
                    #print "<",cols[2],">"
                    alts=cols[2].split("/")
                    if len(alts)==1:                    
                        ceiling=alts[0]
                        floor="GND"
                    else:
                        ceiling,floor=alts
                    print "Parsed",ceiling,floor
コード例 #7
0
ファイル: ev_parse_tma.py プロジェクト: dimme/SwFlightPlanner
def ev_parse_tma():
    out = []
    parser = lxml.html.HTMLParser()
    #url="/Latvia_EV-ENR-2.1-en-GB.html"
    cur_airac = get_cur_airac()
    url = "/eAIPfiles/%s-AIRAC/html/eAIP/EV-ENR-2.1-en-GB.html" % (cur_airac, )

    data, date = fetchdata.getdata(url, country='ev')
    parser.feed(data)
    tree = parser.close()

    got_fir = False
    for table in tree.xpath("//table"):
        #print "Table with %d children"%(len(table.getchildren()),)
        rows = list(table.xpath(".//tr"))
        for idx in xrange(5):
            headingrow = rows[idx]
            cols = list(headingrow.xpath(".//th"))
            #print len(cols)
            if len(cols) == 5:
                break
        else:
            raise Exception("No heading row")
        assert idx == 0
        #for idx,col in enumerate(cols):
        #    print "Col %d, %s"%(idx,alltext(col)[:10])
        nameh, unith, callsignh, freqh, remarkh = cols
        assert alltext(nameh).lower().count("name")
        assert alltext(unith).lower().count("unit")
        assert re.match(ur"call\s*sign", alltext(callsignh).lower())
        lastcols = None
        for row in rows[1:]:
            cols = list(row.xpath(".//td"))
            if len(cols) == 5:
                name, unit, callsign, freq, remark = cols
                lastcols = cols
            else:
                if lastcols:
                    unit, callsign, freq, remark = lastcols[1:]
                    name = cols[0]
                else:
                    continue

            lines = [x.strip() for x in alltext(name).split("\n") if x.strip()]
            if len(lines) == 0: continue
            spacename = lines[0].strip()

            if re.match(ur"RIGA\s*UTA|RIGA\s*CTA|RIGA\s*AOR.*", spacename):
                continue
            freqstr = alltext(freq)
            callsignstr = alltext(callsign)
            if freqstr.strip():
                print freqstr
                freqmhzs = re.findall(ur"\d{3}\.\d{3}", freqstr)
                assert len(freqmhzs) <= 2
                callsigns = [callsignstr.split("\n")[0].strip()]
                freqs = []
                for idx, freqmhz in enumerate(freqmhzs):
                    if freqmhz == '121.500': continue
                    freqs.append((callsigns[idx], float(freqmhz)))
                print "freqs:", freqs
            else:
                freqs = []
            assert len(lines)

            classidx = next(idx for idx, x in reversed(list(enumerate(lines)))
                            if x.lower().count("class of airspace"))

            if re.match(ur"RIGA\s*FIR.*UIR", spacename, re.UNICODE):
                got_fir = True
                lastspaceidx = classidx - 2
                floor = "GND"
                ceiling = "-"
                type_ = "FIR"
            else:
                if lines[classidx - 1].count("/") == 1:
                    floor, ceiling = lines[classidx - 1].split("/")
                    lastspaceidx = classidx - 1
                else:
                    floor = lines[classidx - 1]
                    ceiling = lines[classidx - 2]
                    lastspaceidx = classidx - 2
                ceiling = strangefix(ceiling)
                floor = strangefix(floor)

                mapper.parse_elev(ceiling)
                mapper.parse_elev(floor)
                type_ = "TMA"
            tcoords = lines[1:lastspaceidx]
            #verify that we got actual altitudes:
            coords = []
            for coord in tcoords:
                coord = coord.strip().replace("(counter-)", "").replace(
                    "(RIGA DVOR - RIA)", "")
                if coord.endswith(u"E") or coord.endswith("W"):
                    coord = coord + " -"
                coords.append(coord)

            raw = " ".join(coords)
            raw = re.sub(
                s(ur"Area bounded by lines successively joining the following points:"
                  ), "", raw)
            print "Raw:", raw
            coords = mapper.parse_coord_str(raw, context='latvia')
            for cleaned in clean_up_polygon(coords):
                out.append(
                    dict(name=spacename,
                         points=cleaned,
                         type=type_,
                         freqs=freqs,
                         floor=floor,
                         url=url,
                         date=date,
                         ceiling=ceiling))
                if type_ == 'FIR':
                    out[-1]['icao'] = "EVRR"
コード例 #8
0
ファイル: ev_parse_tma.py プロジェクト: avl/SwFlightPlanner
def ev_parse_tma():
    out = []
    parser = lxml.html.HTMLParser()
    # url="/Latvia_EV-ENR-2.1-en-GB.html"
    cur_airac = get_cur_airac()
    url = "/eAIPfiles/%s-AIRAC/html/eAIP/EV-ENR-2.1-en-GB.html" % (cur_airac,)

    data, date = fetchdata.getdata(url, country="ev")
    parser.feed(data)
    tree = parser.close()

    got_fir = False
    for table in tree.xpath("//table"):
        # print "Table with %d children"%(len(table.getchildren()),)
        rows = list(table.xpath(".//tr"))
        for idx in xrange(5):
            headingrow = rows[idx]
            cols = list(headingrow.xpath(".//th"))
            # print len(cols)
            if len(cols) == 5:
                break
        else:
            raise Exception("No heading row")
        assert idx == 0
        # for idx,col in enumerate(cols):
        #    print "Col %d, %s"%(idx,alltext(col)[:10])
        nameh, unith, callsignh, freqh, remarkh = cols
        assert alltext(nameh).lower().count("name")
        assert alltext(unith).lower().count("unit")
        assert re.match(ur"call\s*sign", alltext(callsignh).lower())
        lastcols = None
        for row in rows[1:]:
            cols = list(row.xpath(".//td"))
            if len(cols) == 5:
                name, unit, callsign, freq, remark = cols
                lastcols = cols
            else:
                if lastcols:
                    unit, callsign, freq, remark = lastcols[1:]
                    name = cols[0]
                else:
                    continue

            lines = [x.strip() for x in alltext(name).split("\n") if x.strip()]
            if len(lines) == 0:
                continue
            spacename = lines[0].strip()

            if re.match(ur"RIGA\s*UTA|RIGA\s*CTA|RIGA\s*AOR.*", spacename):
                continue
            freqstr = alltext(freq)
            callsignstr = alltext(callsign)
            if freqstr.strip():
                print freqstr
                freqmhzs = re.findall(ur"\d{3}\.\d{3}", freqstr)
                assert len(freqmhzs) <= 2
                callsigns = [callsignstr.split("\n")[0].strip()]
                freqs = []
                for idx, freqmhz in enumerate(freqmhzs):
                    if freqmhz == "121.500":
                        continue
                    freqs.append((callsigns[idx], float(freqmhz)))
                print "freqs:", freqs
            else:
                freqs = []
            assert len(lines)

            classidx = next(idx for idx, x in reversed(list(enumerate(lines))) if x.lower().count("class of airspace"))

            if re.match(ur"RIGA\s*FIR.*UIR", spacename, re.UNICODE):
                got_fir = True
                lastspaceidx = classidx - 2
                floor = "GND"
                ceiling = "-"
                type_ = "FIR"
            else:
                if lines[classidx - 1].count("/") == 1:
                    floor, ceiling = lines[classidx - 1].split("/")
                    lastspaceidx = classidx - 1
                else:
                    floor = lines[classidx - 1]
                    ceiling = lines[classidx - 2]
                    lastspaceidx = classidx - 2
                ceiling = strangefix(ceiling)
                floor = strangefix(floor)

                mapper.parse_elev(ceiling)
                mapper.parse_elev(floor)
                type_ = "TMA"
            tcoords = lines[1:lastspaceidx]
            # verify that we got actual altitudes:
            coords = []
            for coord in tcoords:
                coord = coord.strip().replace("(counter-)", "").replace("(RIGA DVOR - RIA)", "")
                if coord.endswith(u"E") or coord.endswith("W"):
                    coord = coord + " -"
                coords.append(coord)

            raw = " ".join(coords)
            raw = re.sub(s(ur"Area bounded by lines successively joining the following points:"), "", raw)
            print "Raw:", raw
            coords = mapper.parse_coord_str(raw, context="latvia")
            for cleaned in clean_up_polygon(coords):
                out.append(
                    dict(
                        name=spacename,
                        points=cleaned,
                        type=type_,
                        freqs=freqs,
                        floor=floor,
                        url=url,
                        date=date,
                        ceiling=ceiling,
                    )
                )
                if type_ == "FIR":
                    out[-1]["icao"] = "EVRR"