예제 #1
0
def parse_doc(path, icao, country, title, category):
    print "Parsing AIP doc"
    icao = icao.upper()
    assert len(icao) == 4
    url = fetchdata.getrawurl(path, country=country)
    ret = dict()
    ret['icao'] = icao
    ret['url'] = url
    ret['title'] = title
    ret['name'] = icao + " - " + title
    ret['category'] = category
    #data,nowdate=fetchdata.getdata(path,country=country,maxcacheage=7200)
    blobname = icao + "_" + category
    tmppath = os.path.join(os.getenv("SWFP_DATADIR"), "aiptext", icao)
    if not os.path.exists(tmppath):
        os.makedirs(tmppath)

    if path.lower().endswith("pdf"):
        outpath_inter = os.path.join(tmppath, blobname + ".tmp.html")

        def render(inputfile, outputfile):
            r = "pdftohtml -c -s -i -zoom 2 -noframes -nodrm %s %s" % (
                inputfile, outputfile
            )  #-s is not supported on older pdftohtml, and doesn't appear necessary either.
            print "running", r
            assert 0 == os.system(r)

        fetchdata.getcreate_derived_data_raw(path,
                                             outpath_inter,
                                             render,
                                             "html",
                                             country=country)

        whole = open(outpath_inter).read()

        fixed = (whole.replace("<BODY bgcolor=\"#A0A0A0\"",
                               "<BODY bgcolor=\"#FFFFFF\"").replace(
                                   "<TITLE>Microsoft Word - ", "<TITLE>"))

    else:
        assert path.endswith("html")
        fixed, date = fetchdata.getdata(path, country=country)

    cksum = md5.md5(fixed).hexdigest()
    outpath = os.path.join(tmppath, blobname + "." + cksum + ".html")
    f = open(outpath, "w")
    f.write(fixed)
    f.close()
    #print "Wrote raw:",out,outpath

    ret['checksum'] = cksum
    ret['date'] = fetchdata.get_filedate(outpath)
    ret['blobname'] = blobname

    return ret
예제 #2
0
def parse_doc(path,icao,country,title,category):
    print "Parsing AIP doc"
    icao=icao.upper()
    assert len(icao)==4
    url=fetchdata.getrawurl(path,country=country)
    ret=dict()
    ret['icao']=icao
    ret['url']=url
    ret['title']=title
    ret['name']=icao+" - "+title
    ret['category']=category
    #data,nowdate=fetchdata.getdata(path,country=country,maxcacheage=7200)
    blobname=icao+"_"+category
    tmppath=os.path.join(os.getenv("SWFP_DATADIR"),"aiptext",icao)
    if not os.path.exists(tmppath):
        os.makedirs(tmppath)
    
    if path.lower().endswith("pdf"):
        outpath_inter=os.path.join(tmppath,blobname+".tmp.html")
        def render(inputfile,outputfile):
            r="pdftohtml -c -s -i -zoom 2 -noframes -nodrm %s %s"%(inputfile,outputfile)  #-s is not supported on older pdftohtml, and doesn't appear necessary either.
            print "running",r
            assert 0==os.system(r)
                
        fetchdata.getcreate_derived_data_raw(
                    path,outpath_inter,render,"html",country=country)
        
        whole=open(outpath_inter).read()
        
        fixed=(whole.replace("<BODY bgcolor=\"#A0A0A0\"","<BODY bgcolor=\"#FFFFFF\"")
                .replace("<TITLE>Microsoft Word - ","<TITLE>"))
        
    else:
        assert path.endswith("html")
        fixed,date=fetchdata.getdata(path,country=country)
        
    cksum=md5.md5(fixed).hexdigest()
    outpath=os.path.join(tmppath,blobname+"."+cksum+".html")
    f=open(outpath,"w")
    f.write(fixed)        
    f.close()
    #print "Wrote raw:",out,outpath
        
    ret['checksum']=cksum
    ret['date']=fetchdata.get_filedate(outpath)
    ret['blobname']=blobname
    
    return ret
예제 #3
0
 def load_xml(self,path,loadhook=None,country="se"):
     raw=fetchdata.getxml(path,country=country)
     
     if loadhook:
         print "Running loadhook"
         bef=raw
         raw=loadhook(raw)
         print "Bef==raw:",bef==raw
     url=fetchdata.getrawurl(path,country)
     xml=ElementTree.fromstring(raw)        
     
     self.fonts=dict()
     for page in xml.getchildren():
         for fontspec in page.findall(".//fontspec"):
             fontid=int(fontspec.attrib['id'])
             fontsize=int(fontspec.attrib['size'])
             fontcolor=fontspec.attrib.get('color','#000000')
             if fontid in self.fonts:
                 assert self.fonts[fontid]['size']==fontsize
             self.fonts[fontid]=dict(size=fontsize,color=fontcolor)
     
     return url,xml
예제 #4
0
    def load_xml(self, path, loadhook=None, country="se"):
        raw = fetchdata.getxml(path, country=country)

        if loadhook:
            print "Running loadhook"
            bef = raw
            raw = loadhook(raw)
            print "Bef==raw:", bef == raw
        url = fetchdata.getrawurl(path, country)
        xml = ElementTree.fromstring(raw)

        self.fonts = dict()
        for page in xml.getchildren():
            for fontspec in page.findall(".//fontspec"):
                fontid = int(fontspec.attrib['id'])
                fontsize = int(fontspec.attrib['size'])
                fontcolor = fontspec.attrib.get('color', '#000000')
                if fontid in self.fonts:
                    assert self.fonts[fontid]['size'] == fontsize
                self.fonts[fontid] = dict(size=fontsize, color=fontcolor)

        return url, xml
예제 #5
0
def parse_landing_chart(path,arppos,icao,country='se',variant=''):
    icao=icao.upper()
    if variant and not variant.startswith("."):
        variant="."+variant
    print "Running parse_landing_chart"
    print "country:",country
    #p=parse.Parser(path,country=country)
    arppos=mapper.from_str(arppos)
    res=[]    
    #assert p.get_num_pages()<=2
    url=fetchdata.getrawurl(path,country=country)
    ret=dict()
    ret['url']=url
    data,nowdate=fetchdata.getdata(path,country=country,maxcacheage=7200)
    cksum=md5.md5(data).hexdigest()
    ret['checksum']=cksum
    #page=p.parse_page_to_items(0, donormalize=False)
    #ret['width']=page.width
    #ret['height']=page.height
    #width=page.width
    #height=page.height
    #scale=2048.0/min(width,height)
    #width*=scale
    #height*=scale
    #width=int(width+0.5)
    #height=int(height+0.5)
    
    blobname=icao+variant
    
    tmppath=os.path.join(os.getenv("SWFP_DATADIR"),"adcharts",icao)
    if not os.path.exists(tmppath):
        os.makedirs(tmppath)
    assert len(icao)==4
    outpath=os.path.join(tmppath,blobname+"."+cksum+".png")
    def render(inputfile,outputfile):
        ext=inputfile.split(".")[-1].lower()
        if ext=='jpg' or ext=='png':
            assert 0==os.system("convert -adaptive-resize 2500x2500 %s %s"%(inputfile,outputfile))            
        else:
            ext='pdf'
            r="pdftoppm -f 0 -l 0 -scale-to 2500 -png -freetype yes -aa yes -aaVector yes %s >%s"%(
                      inputfile,outputfile)
            print "rendering",r
            assert 0==os.system(r)
            
    ret['image']=blobname+"."+cksum+".png"
    fetchdata.getcreate_derived_data_raw(
                path,outpath,render,"png",country=country)

    
    fspath=fetchdata.getdatafilename(path,country=country)
    sizepts=None
    for line in os.popen("pdfinfo "+fspath):        
        m=re.match(r"\s*.age\s+size:\s*(\d+\.?\d*)\s*x\s*(\d+\.?\d*)\s*pts.*",line)
        if m:
            sizepts=(float(m.groups()[0]),float(m.groups()[1]))
    if sizepts:
        sizemm=(0.3527*sizepts[0],0.3527*sizepts[1])
        ret['mapsize']=sizemm
        print "Mapsize:",sizemm
    else:
        raise Exception("No size of this PDF!") 
    
    
    outpath2=os.path.join(tmppath,blobname+"."+cksum+".2.png")
    def greyscale(input,output):
        assert 0==os.system("convert -define png:color-type=3 -depth 8 -type Palette -define \"png:compression-level=9\" %s %s"%(input,output))
    
    fetchdata.getcreate_local_data_raw(
                outpath,outpath2,greyscale)
    i=Image.open(outpath2)
    width,height=i.size
    #ret['width']=page.width
    #ret['height']=page.height    
    ret['render_width']=width
    ret['render_height']=height
 
    if country!='raw':
        icao_prefix=get_icao_prefix(country)
        assert icao.startswith(icao_prefix)
    
    for level in xrange(5):
        hashpath=os.path.join(tmppath,"%s.%s-%d.bin"%(blobname,cksum,level))
        fetchdata.getcreate_local_data_raw(
                    outpath2,hashpath,lambda input,output:chop_up(input,output,level))    

    
    ret['blobname']=blobname
    ret['variant']=variant
    
    return ret