def get_translation_dictionary(request):
    """Return a JSON file including all translations stored in the database
    """    
    
    # Get the session
    session = request.session
    
    # Define language to get multilingual labels for the selected language
    # defaults to 'fr': french - this may be changed in the appconfig
    if 'lang' not in session:
        lang = request.registry.settings['default_language'].lower()
    else : 
        lang = session['lang'].lower()
    translationDico = get_translations(lang)
    # get the translation dictionnary
    langDico = get_translations(lang)
    
    return langDico
Exemplo n.º 2
0
def get_translation_dictionary(request):
    """Return a JSON file including all translations stored in the database
    """

    # Get the session
    session = request.session

    # Define language to get multilingual labels for the selected language
    # defaults to 'fr': french - this may be changed in the appconfig
    if 'lang' not in session:
        lang = request.registry.settings['default_language'].lower()
    else:
        lang = session['lang'].lower()

    # get the translation dictionnary
    translationDico = get_translations(lang)

    return translationDico
Exemplo n.º 3
0
def get_content(id, request):
    """ TODO....
        Explain how the whole thing works...
    """
    # Start a session
    session = request.session
    configs = DBSession.query(AppConfig).all()

    # initalize extract object
    extract = Extract(request)
    type = request.matchdict.get("type_")
    directprint = False
    if type == 'file':
        type = 'reduced'
        directprint = True
    for config in configs:
        if config.parameter not in ['crdppflogopath', 'cantonlogopath']:
            extract.baseconfig[config.parameter] = config.paramvalue
    extract.srid = db_config['srid']

    extract.topiclegenddir = request.static_url('crdppfportal:static/public/legend/')

    # Define language to get multilingual labels for the selected language
    # defaults to 'fr': french - this may be changed in the appconfig
    if 'lang' not in session:
        extract.baseconfig['lang'] = request.registry.settings['default_language'].lower()
    else:
        extract.baseconfig['lang'] = session['lang'].lower()
    extract.baseconfig['translations'] = get_translations(extract.baseconfig['lang'])
    # for simplification
    translations = extract.baseconfig['translations']

    # 1) If the ID of the parcel is set get the basic attributs of the property
    # else get the ID (id) of the selected parcel first using X/Y coordinates of the center
    # ---------------------------------------------------------------------------------------------------
    featureinfo = get_feature_info(id, extract.srid, translations)  # '1_14127' # test parcel or '1_11340'
    extract.filename = extract.id + featureinfo['featureid']

    # 3) Get the list of all the restrictions by topicorder set in a column
    # ------------------------------------------
    extract.topics = DBSession.query(Topics).order_by(Topics.topicorder).all()

    # Configure the WMTS background layer

    defaultTiles = request.registry.settings['defaultTiles']
    wmts = {
        'url': request.registry.settings['wmts_getcapabilities_url'],
        'defaultTiles': defaultTiles,
        'layer': defaultTiles['wmtsname']
    }

    wmts_layer_ = wmts_layer(wmts['url'], wmts['layer'])
    extract.baseconfig['wmts'] = wmts

    base_wms_layers = request.registry.settings['app_config']['crdppf_wms_layers']
    map_buffer = request.registry.settings['app_config']['map_buffer']
    basemaplayers = {
        "baseURL": request.registry.settings['crdppf_wms'],
        "opacity": 1,
        "type": "WMS",
        "layers": base_wms_layers,
        "imageFormat": "image/png",
        "styles": "default",
        "customParams": {
            "TRANSPARENT": "true"
        }
    }

    municipality = featureinfo['nomcom'].strip()
    cadastre = featureinfo['nomcad'].strip()
    propertynumber = featureinfo['nummai'].strip()
    propertytype = featureinfo['type'].strip()
    propertyarea = featureinfo['area']
    report_title = translations[str(type+'extracttitlelabel')]

    # AS does the german language, the french contains a few accents we have to replace to fetch the banner which has no accents in its pathname...
    conversion = [
        [u'â', 'a'],
        [u'ä', 'a'],
        [u'à', 'a'],
        [u'ô', 'o'],
        [u'ö', 'o'],
        [u'ò', 'o'],
        [u'û', 'u'],
        [u'ü', 'u'],
        [u'ù', 'u'],
        [u'î', 'i'],
        [u'ï', 'i'],
        [u'ì', 'i'],
        [u'ê', 'e'],
        [u'ë', 'e'],
        [u'è', 'e'],
        [u'é', 'e'],
        [u' ', ''],
        [u'-', '_'],
        [u'(NE)', ''],
        [u' (NE)', '']
    ]

    municipality_escaped = municipality.strip()

    for char in conversion:
        municipality_escaped = municipality_escaped.replace(char[0], char[1])
    extract.header['municipality_escaped'] = municipality_escaped

    municipalitylogodir = '/'.join([
        request.registry.settings['localhost_url'],
        'proj/images/ecussons/'])
    municipalitylogopath = municipalitylogodir + municipality_escaped + '.png'
    extract.header['municipalitylogopath'] = municipalitylogopath
    legenddir = '/'.join([
        request.registry.settings['localhost_url'],
        'proj/images/icons/'])

    # Get the raw feature BBOX
    extract.basemap['bbox'] = get_feature_bbox(id)
    bbox = extract.basemap['bbox']

    if bbox is False:
        log.warning('Found more then one bbox for id: %s' % id)
        return False

    # Get the feature center
    extract.basemap['feature_center'] = get_feature_center(id)
    feature_center = extract.basemap['feature_center']

    if feature_center is False:
        log.warning('Found more then one geometry for id: %s' % id)
        return False

    # Get the print BOX
    print_box = get_print_format(bbox, request.registry.settings['pdf_config']['fitratio'])
    map_bbox = get_mapbox(feature_center, print_box['scale'], map_buffer, print_box['height'], print_box['width'],
                          request.registry.settings['pdf_config']['fitratio'])

    log.warning('Calling feature: %s' % request.route_url('get_property')+'?id='+id)

    wkt_polygon = ''.join([
        'POLYGON((',
        str(bbox['minX'])+' ',
        str(bbox['minY'])+',',
        str(bbox['minX'])+' ',
        str(bbox['maxY'])+',',
        str(bbox['maxX'])+' ',
        str(bbox['maxY'])+',',
        str(bbox['maxX'])+' ',
        str(bbox['minY'])+',',
        str(bbox['minX'])+' ',
        str(bbox['minY']),
        '))'
    ])

    basemap = {
        "projection": "EPSG:"+str(extract.srid),
        "dpi": 150,
        "rotation": 0,
        "center": feature_center,
        "scale": print_box['scale']*map_buffer,
        "longitudeFirst": "true",
        "layers": [{
            "type": "geojson",
            "geoJson": request.route_url('get_property')+'?id='+id,
            "style": {
                "version": "2",
                "strokeColor": "gray",
                "strokeLinecap": "round",
                "strokeOpacity": 0.6,
                "[INTERSECTS(geometry, "+wkt_polygon+")]": {
                    "symbolizers": [{
                        "strokeColor": "red",
                        "strokeWidth": 2,
                        "type": "line"
                    }]
                }
            }
        }, wmts_layer_
        ]
    }

    data = []
    topicdata = {}
    topicdata["doclist"] = []
    appconfig = extract.baseconfig
    concernedtopics = []
    notconcernedtopics = []
    emptytopics = []

    for topic in extract.topics:

        # for the federal data layers we get the restrictions calling the feature service and store the result in the DB
        if topic.topicid in extract.baseconfig['ch_topics']:
            xml_layers = []
            for xml_layer in topic.layers:
                xml_layers.append(xml_layer.layername)
            #  get_XML(feature['geom'], topic.topicid, extractcreationdate, lang, translations)

        topicdata[str(topic.topicid)] = {
            "categorie": 0,
            "docids": set([]),
            "topicname": topic.topicname,
            "bboxlegend": [],
            "layers": {},
            "legalbase": [],
            "legalprovision": [],
            "reference": [],
            "authority": {
                "authorityuuid": topic.authority.authorityid,
                "authorityname": topic.authority.authorityname,
                "authorityurl": topic.authority.authoritywww
                },
            "topicorder": topic.topicorder,
            "authorityfk": topic.authorityfk,
            "publicationdate": topic.publicationdate
            }

        # if geographic layers are defined for the topic, get the list of all layers and then
        # check for each layer the information regarding the features touching the property
        if topic.layers:
            topicdata[str(topic.topicid)]['wmslayerlist'] = []

            for layer in topic.layers:
                topicdata[str(topic.topicid)]["layers"][layer.layerid] = {
                    "layername": layer.layername,
                    "features": None
                    }
                topicdata[str(topic.topicid)]['wmslayerlist'].append(layer.layername)

                # intersects a given layer with the feature and adds the results to the topicdata- see method add_layer
                layerdata = add_layer(layer, propertynumber, featureinfo, translations, appconfig, topicdata)
                if layerdata is not None:
                    topicdata[str(topic.topicid)]['layers'][layer.layerid]['features'] = layerdata[str(layer.layerid)]['features']
                    topicdata[str(topic.topicid)]['categorie'] = 3
                    for restrictionid in layerdata[str(layer.layerid)]['ids']:
                        docfilters = [restrictionid]
                        for doctype in appconfig['doctypes'].split(','):
                            docfilters.append(doctype)
                            docidlist = get_document_ref(docfilters)
                            docs = set_documents(str(layer.topicfk), doctype, docidlist, featureinfo, False, topicdata[str(topic.topicid)]['docids'])
                            for doc in docs:
                                topicdata[str(topic.topicid)]['docids'].add(doc['documentid'])
                                del doc['documentid']
                                topicdata[str(topic.topicid)][doctype].append(doc)
                else:
                    topicdata[str(topic.topicid)]['layers'][layer.layerid] = {'layername': layer.layername, 'features': None}
                    if topicdata[str(topic.topicid)]['categorie'] != 3:
                        topicdata[str(topic.topicid)]['categorie'] = 1

                # get the legend entries in the map bbox not touching the features
                featurelegend = get_legend_classes(to_shape(featureinfo['geom']), layer.layername, translations, extract.srid)
                bboxlegend = get_legend_classes(to_shape(WKTElement(map_bbox)), layer.layername, translations, extract.srid)
                bboxitems = set()
                for legenditem in bboxlegend:
                    if legenditem not in featurelegend:
                        bboxitems.add(tuple(legenditem.items()))
                if len(bboxitems) > 0:
                    for el in bboxitems:
                        legendclass = dict((x, y) for x, y in el)
                        legendclass['codegenre'] = legenddir+legendclass['codegenre']+".png"
                        topicdata[str(topic.topicid)]["bboxlegend"].append(legendclass)
            # Get the list of documents related to a topic with layers and results
            if topicdata[str(layer.topicfk)]["categorie"] == 3:
                docfilters = [str(topic.topicid)]
                for doctype in appconfig["doctypes"].split(','):
                    docidlist = get_document_ref(docfilters)
                    docs = set_documents(str(topic.topicid), doctype, docidlist, featureinfo, True, topicdata[str(topic.topicid)]['docids'])
                    for doc in docs:
                        topicdata[str(topic.topicid)]['docids'].add(doc['documentid'])
                        del doc['documentid']
                        topicdata[str(topic.topicid)][doctype].append(doc)
        else:
            if str(topic.topicid) in appconfig['emptytopics']:
                emptytopics.append(topic.topicname)
                topicdata[str(topic.topicid)]['layers'] = None
                topicdata[str(topic.topicid)]['categorie'] = 0
            else:
                topicdata[str(topic.topicid)]['layers'] = None
                topicdata[str(topic.topicid)]['categorie'] = 1

        if topicdata[str(topic.topicid)]['categorie'] == 1:
            notconcernedtopics.append(topic.topicname)

        if topicdata[topic.topicid]["categorie"] == 3:
            appendiceslist = []

            for i, legalprovision in enumerate(topicdata[str(topic.topicid)]["legalprovision"]):
                if legalprovision["officialtitle"] != "":
                    appendiceslist.append(['A'+str(i+1), legalprovision["officialtitle"]])
            for doctype in appconfig["doctypes"].split(','):
                if topicdata[str(topic.topicid)][doctype] == []:
                    topicdata[str(topic.topicid)][doctype] = [{
                        "officialtitle": "",
                        "title": '',
                        "remoteurl": ""
                    }]

            concernedtopics.append({
                "topicname": topic.topicname,
                "documentlist": {
                    "columns": ["appendixno", "appendixtitle"],
                    "data": appendiceslist
                }
            })

            if topicdata[topic.topicid]['layers']:
                topicdata[str(topic.topicid)]["restrictions"] = []

                for layer in topicdata[topic.topicid]['layers']:
                    if topicdata[topic.topicid]['layers'][layer]['features']:
                        for feature in topicdata[topic.topicid]['layers'][layer]['features']:
                            if 'teneur' in feature.keys() and feature['teneur'] is not None and feature['statutjuridique'] is not None:
                                if feature['codegenre'] is None:
                                    feature['codegenre'] = '9999'
                                if isinstance(feature['codegenre'], int):
                                    feature['codegenre'] = str(feature['codegenre'])
                                if feature['geomType'] == 'area':
                                    topicdata[str(topic.topicid)]["restrictions"].append({
                                        "codegenre": legenddir+feature['codegenre']+".png",
                                        "teneur": feature['teneur'],
                                        "area": feature['intersectionMeasure'].replace(' : ', '').replace(' - ', '').replace('[m2]', 'm<sup>2</sup>'),
                                        "area_pct": round((float(
                                            feature['intersectionMeasure'].replace(' : ', '').replace(' - ', '').replace(' [m2]', ''))*100)/propertyarea, 1)
                                    })
                                else:
                                    topicdata[str(topic.topicid)]["restrictions"].append({
                                        "codegenre": legenddir+feature['codegenre']+".png",
                                        "teneur": feature['teneur'],
                                        "area": feature['intersectionMeasure'].replace(' - ', '').replace(' : ', '').replace('[m]', 'm'),
                                        "area_pct": -1
                                    })
                            else:
                                for property, value in feature.iteritems():
                                    if value is not None and property != 'featureClass':
                                        if isinstance(value, float) or isinstance(value, int):
                                            value = str(value)
            topiclayers = {
                "baseURL": request.registry.settings['crdppf_wms'],
                "opacity": 1,
                "type": "WMS",
                "layers": topicdata[str(topic.topicid)]['wmslayerlist'],
                "imageFormat": "image/png",
                "styles": "default",
                "customParams": {
                    "TRANSPARENT": "true"
                }
            }

            # This define the "general" map that we are going to copy x times,
            # one time as base map and x times as topic map.
            map = {
                "projection": "EPSG:"+str(extract.srid),
                "dpi": 150,
                "rotation": 0,
                "center": feature_center,
                "scale": print_box['scale']*map_buffer,
                "longitudeFirst": "true",
                "layers": [
                    {
                        "type": "geojson",
                        "geoJson": request.route_url('get_property')+'?id='+id,
                        "style": {
                            "version": "2",
                            "strokeColor": "gray",
                            "strokeLinecap": "round",
                            "strokeOpacity": 0.6,
                            "[INTERSECTS(geometry, "+wkt_polygon+")]": {
                                "symbolizers": [
                                    {
                                        "strokeColor": "red",
                                        "strokeWidth": 2,
                                        "type": "line"
                                    }
                                ]
                            }
                        }
                    },
                    topiclayers,
                    basemaplayers
                ]
            }

            if topicdata[str(topic.topicid)]["bboxlegend"] == []:
                topicdata[str(topic.topicid)]["bboxlegend"] = [{
                    "codegenre": "",
                    "teneur": ""
                    }]

            data.append({
                "topicname": topic.topicname,
                "map": map,
                "restrictions": topicdata[str(topic.topicid)]["restrictions"],
                "bboxlegend": topicdata[str(topic.topicid)]["bboxlegend"],
                "completelegend": extract.topiclegenddir+str(topic.topicid)+'_topiclegend.pdf',
                "legalbases": topicdata[str(topic.topicid)]["legalbase"],
                "legalprovisions": topicdata[str(topic.topicid)]["legalprovision"],
                "references": topicdata[str(topic.topicid)]["reference"],
                "authority": [
                    topicdata[str(topic.topicid)]["authority"]
                ]
            })

    d = {
        "attributes": {
            "reporttype": type,
            "directprint": directprint,
            "extractcreationdate": extract.creationdate,
            "filename": extract.filename,
            "extractid": extract.id,
            "map": basemap,
            "municipality": municipality,
            "cadastre": cadastre,
            "cadastrelabel": "Cadastre",
            "propertytype": propertytype,
            "propertynumber": propertynumber,
            "EGRIDnumber": featureinfo['egrid'],
            "municipalitylogopath": municipalitylogopath,
            "federalmunicipalitynumber": featureinfo['nufeco'],
            "competentauthority": extract.baseconfig['competentauthority'],
            "titlepage": [{
                "title": report_title,
                "certificationinstance": "",
                "certificationtext": "",
            }],
            "concernedtopics":  concernedtopics,
            "notconcernedtopics": ";".join(notconcernedtopics),
            "emptytopics": ";".join(emptytopics),
            "propertyarea": propertyarea,
            "datasource": data
        },
        "layout": "report",
        "outputFilename": extract.filename,
        "outputFormat": "pdf"
    }

    # import json
    # pretty printed json data for the extract
    # jsonfile = open('C:/Temp/'+extract.filename+'.json', 'w')
    # jsondata = json.dumps(d, indent=4)
    # jsonfile.write(jsondata)
    # jsonfile.close()

    return d
Exemplo n.º 4
0
def create_extract(request):
    """The function collects alle the necessary data from the subfunctions and classes
       and then writes the pdf file of the extract."""

    # Start a session
    session = request.session

    logon = request.registry.settings['logon']
    if logon == 'False':
        logon = False
    else:
        logon = True

    log2 = None

    if logon is True:
        log.warning("Entering PDF extract")
        log2 = log

    # Create an instance of an extract
    extract = Extract(request, log2)

    if logon is True:
        log.warning("Created Extract class")
    # Define the extract type if not set in the request parameters
    # defaults to 'standard': no certification, with pdf attachements
    # other values :
    # certified : with certification and with all pdf attachements
    # reduced : no certification, no pdf attachements
    # reducedcertified : with certification, without pdf attachments

    extract.reportInfo = {}
    defaulttype = 'standard'

    if request.params.get('type') :
        extract.reportInfo['type'] = str(request.params.get('type').lower())
    else : 
        extract.reportInfo['type'] = defaulttype

    # Define language to get multilingual labels for the selected language
    # defaults to 'fr': french - this may be changed in the appconfig
    if 'lang' not in session:
        lang = request.registry.settings['default_language'].lower()
    else : 
        lang = session['lang'].lower()
    extract.translations = get_translations(lang)
    extract.lang = lang

    if logon is True:
        log.warning("Created language init")
    # GET the application configuration parameters such as base paths,
    # working directory and other default parameters
    extract.load_app_config(request.registry.settings['app_config'])

    if logon is True:
        log.warning("load_app_config()")
    # GET the PDF Configuration parameters such as the page layout, margins
    # and text styles
    extract.set_pdf_config(request.registry.settings['pdf_config'])

    if logon is True:
        log.warning("set_pdf_config()")
    # promote often used variables to facilitate coding
    pdfconfig = extract.pdfconfig

    if logon is True:
        log.warning("pdfconfig")

    translations = extract.translations

    if logon is True:
        log.warning("translations")
    # to get vars defined in the buildout  use : request.registry.settings['key']
    pdfconfig.sld_url = extract.sld_url

    if logon is True:
        log.warning("extract.sld_url")

# *************************
# MAIN PROGRAM PART
#=========================

    # 1) If the ID of the parcel is set get the basic attributs of the property
    # else get the ID (idemai) of the selected parcel first using X/Y coordinates of the center 
    #----------------------------------------------------------------------------------------------------
    extract.featureInfo = get_feature_info(request,translations) # '1_14127' # test parcel or '1_11340'

    featureInfo = extract.featureInfo

    # complete the dictionnary for the parcel - to be put in the appconfig
    extract.featureInfo['operator'] = translations['defaultoperatortext']
    
    extract.featureid = featureInfo['featureid']
    extract.set_filename()

    # the get_print_format function which would define the ideal paper format and orientation for the
    # extract. It is not needed any longer as the paper size has been fixed to A4 portrait by the cantons
    # BUT there could be a change of opinion after the start phase, so we keep this code part for now
    extract.printformat = get_print_format(featureInfo['BBOX'],pdfconfig.fitratio)

    # 2) Get the parameters for the paper format and the map based on the feature's geometry
    #---------------------------------------------------------------------------------------------------
    extract.get_map_format()

    # again we promote the variables one level
    printformat = extract.printformat

    # 3) Get the list of all the restrictions by topicorder set in a column
    #-------------------------------------------
    extract.topics = DBSession.query(Topics).order_by(Topics.topicorder).all()

    # Get the community name and escape special chars to place the logo in the header of the title page
    municipality = featureInfo['nomcom'].strip()

    if logon is True:
        log.warning('Town: %s', municipality)

    # AS does the german language, the french contains a few accents we have to replace to fetch the banner which has no accents in its pathname...
    conversion = [
        [u'â', 'a'],
        [u'ä' ,'a'],
        [u'à', 'a'],
        [u'ô', 'o'],
        [u'ö', 'o'],
        [u'ò', 'o'],
        [u'û', 'u'],
        [u'ü', 'u'],
        [u'ù', 'u'],
        [u'î', 'i'],
        [u'ï', 'i'],
        [u'ì', 'i'],
        [u'ê', 'e'],
        [u'ë', 'e'],
        [u'è', 'e'],
        [u'é', 'e'],
        [u' ', ''],
        [u'-','_'],
        [u'(NE)', ''],
        [u' (NE)', '']
    ]

    municipality_escaped = municipality.strip()

    for char in conversion:
        municipality_escaped = municipality_escaped.replace(char[0], char[1])

    extract.municipalitylogopath = extract.appconfig.municipalitylogodir + municipality_escaped + '.png'

    extract.municipality = municipality # to clean up once code modified

    # Get the data for the federal data layers using the map extend
    if logon is True:
        log.warning('get XML from CH feature service')
        
    for topic in extract.topics:
        # for the federal data layers we get the restrictions calling the feature service and store the result in the DB
        if topic.topicid in extract.appconfig.ch_topics:
            xml_layers = []
            for xml_layer in topic.layers:
                xml_layers.append(xml_layer.layername)
            get_XML(extract.featureInfo['geom'], topic.topicid, pdfconfig.timestamp, lang, translations)

    if logon is True:
        log.warning('get XML from CH feature service DONE')

    # Create basemap
    extract.get_basemap()

    # 4) Create the title page for the pdf extract
    #--------------------------------------------------
    if logon is True:
        log.warning('get_site_map')

    extract.get_site_map()

    if logon is True:
        log.warning('get_site_map DONE')

    # 5) Create the pages of the extract for each topic in the list
    #---------------------------------------------------
    # Thematic pages
    count = 1
    for topic in extract.topics:
        if logon is True:
            log.warning("Begin of topic no %s, topic_id: %s", count, topic.topicid)
        add = extract.add_topic(topic)

        if logon is True:
            log.warning("End of topic no %s", count)
            count += 1
        # to print the topics in ther right order - this could probably be done in a more elegant way
        extract.topicorder[topic.topicorder] = topic.topicid

    # Write pdf file to disc
    extract.get_title_page()

    if logon is True:
        log.warning("get_title_page")

    # Create the table of content
    #--------------------------------------------------
    extract.get_toc()

    if logon is True:
        log.warning("get_toc")
    # Create the list of appendices
    #--------------------------------------------------
    extract.Appendices()

    if logon is True:
        log.warning("Appendices")

    count = 1
    for topic in extract.topicorder.values():
        extract.write_thematic_page(topic)
        if logon is True:
            log.warning("write_thematic_page, page n° %s", count)
        count += 1
    # Set the page number once all the pages are printed
    for key in extract.pages.keys():
        extract.pages[key] = extract.pages[key].replace('{no_pg}', str(' ')+str(key))

    extract.output(pdfconfig.pdfpath+pdfconfig.pdfname+'.pdf','F')

    if logon is True:
        log.warning("File created")

    path = extract.appconfig.legaldocsdir + str('pas_disponible.pdf')
    exception = extract.appconfig.legaldocsdir + str('exception.pdf')
    
    j = 1
    appendicesfiles = []
    # If report type is not 'reduced': Add a title page in front of every attached pdf
    if extract.reportInfo['type'] != 'reduced' and extract.reportInfo['type'] != 'reducedcertified':
        appendicesfiles= [pdfconfig.pdfpath+pdfconfig.pdfname+'.pdf']
        for appendix in extract.appendix_entries:
            appendixfile = AppendixFile()
            appendixfile.creationdate = str(extract.creationdate)
            appendixfile.timestamp = str(extract.timestamp)
            appendixfile.reporttype = str(extract.reportInfo['type'])
            appendixfile.translations = get_translations(lang)
            appendixfile.current_page = ' A' + str(j)
            appendixfile.load_app_config(request.registry.settings['app_config'])
            appendixfile.set_pdf_config(request.registry.settings['pdf_config']) #extract.pdfconfig
            appendixfile.municipalitylogopath = appendixfile.appconfig.municipalitylogodir + municipality_escaped + '.png'
            appendixfile.municipality = municipality # to clean up once code modified
            appendixfile.add_page()
            appendixfile.set_margins(*pdfconfig.pdfmargins)
            appendixfile.set_y(55)
            appendixfile.set_link(str(j))
            appendixfile.set_font(*pdfconfig.textstyles['title3'])
            appendixfile.cell(15, 10, str('Annexe '+str(j)), 0, 1, 'L')
            appendixfile.multi_cell(0, 10, str(appendix['title']), 0, 'L')
            appendixfile.output(pdfconfig.pdfpath+pdfconfig.pdfname+'_a'+str(j)+'.pdf','F')
            appendicesfiles.append(pdfconfig.pdfpath+pdfconfig.pdfname+'_a'+str(j)+'.pdf')
            extract.cleanupfiles.append(pdfconfig.pdfpath+pdfconfig.pdfname+'_a'+str(j)+'.pdf')
            if appendix['path'] is not None:
                appendicesfiles.append(extract.appconfig.legaldocsdir+appendix['path'])
            else:
                appendicesfiles.append(exception)
            j += 1
        merger = PdfFileMerger()
        for appendixfile in appendicesfiles:
            try:
                merger.append(PdfFileReader(file(appendixfile, 'rb')))
            except:
                merger.append(PdfFileReader(file(exception, 'rb')))

        merger.write(pdfconfig.pdfpath+pdfconfig.pdfname+'.pdf')
        if logon is True:
            log.warning("Merge appendices")

    extract.clean_up_temp_files()

    pdffile = {'pdfurl':request.static_url('crdppfportal:static/public/pdf/'+pdfconfig.pdfname+'.pdf')}

    return pdffile
Exemplo n.º 5
0
def get_content(id, request):
    """ TODO....
        Explain how the whole thing works...
    """
    # Start a session
    session = request.session
    configs = DBSession.query(AppConfig).all()

    # initalize extract object
    extract = Extract(request)
    type = request.matchdict.get("type_")
    directprint = False
    if type == 'file':
        type = 'reduced'
        directprint = True
    for config in configs:
        if config.parameter not in ['crdppflogopath', 'cantonlogopath']:
            extract.baseconfig[config.parameter] = config.paramvalue
    extract.srid = db_config['srid']

    extract.topiclegenddir = request.static_url(
        'crdppfportal:static/public/legend/')

    # Define language to get multilingual labels for the selected language
    # defaults to 'fr': french - this may be changed in the appconfig
    if 'lang' not in session:
        extract.baseconfig['lang'] = request.registry.settings[
            'default_language'].lower()
    else:
        extract.baseconfig['lang'] = session['lang'].lower()
    extract.baseconfig['translations'] = get_translations(
        extract.baseconfig['lang'])
    # for simplification
    translations = extract.baseconfig['translations']

    # 1) If the ID of the parcel is set get the basic attributs of the property
    # else get the ID (id) of the selected parcel first using X/Y coordinates of the center
    # ---------------------------------------------------------------------------------------------------
    featureinfo = get_feature_info(
        id, extract.srid, translations)  # '1_14127' # test parcel or '1_11340'
    featureinfo = featureinfo
    extract.filename = extract.id + featureinfo['featureid']

    # 3) Get the list of all the restrictions by topicorder set in a column
    # ------------------------------------------
    extract.topics = DBSession.query(Topics).order_by(Topics.topicorder).all()

    # Configure the WMTS background layer

    defaultTiles = request.registry.settings['defaultTiles']
    wmts = {
        'url': request.registry.settings['wmts_getcapabilities_url'],
        'defaultTiles': defaultTiles,
        'layer': defaultTiles['wmtsname']
    }

    wmts_layer_ = wmts_layer(wmts['url'], wmts['layer'])
    extract.baseconfig['wmts'] = wmts

    base_wms_layers = request.registry.settings['app_config'][
        'crdppf_wms_layers']
    map_buffer = request.registry.settings['app_config']['map_buffer']
    basemaplayers = {
        "baseURL": request.registry.settings['crdppf_wms'],
        "opacity": 1,
        "type": "WMS",
        "layers": base_wms_layers,
        "imageFormat": "image/png",
        "styles": "default",
        "customParams": {
            "TRANSPARENT": "true"
        }
    }

    municipality = featureinfo['nomcom'].strip()
    cadastre = featureinfo['nomcad'].strip()
    propertynumber = featureinfo['nummai'].strip()
    propertytype = featureinfo['type'].strip()
    propertyarea = featureinfo['area']
    report_title = translations[str(type + 'extracttitlelabel')]

    # AS does the german language, the french contains a few accents we have to replace to fetch the banner which has no accents in its pathname...
    conversion = [[u'â', 'a'], [u'ä', 'a'], [u'à', 'a'], [u'ô', 'o'],
                  [u'ö', 'o'], [u'ò', 'o'], [u'û', 'u'], [u'ü', 'u'],
                  [u'ù', 'u'], [u'î', 'i'], [u'ï', 'i'], [u'ì', 'i'],
                  [u'ê', 'e'], [u'ë', 'e'], [u'è', 'e'], [u'é', 'e'],
                  [u' ', ''], [u'-', '_'], [u'(NE)', ''], [u' (NE)', '']]

    municipality_escaped = municipality.strip()

    for char in conversion:
        municipality_escaped = municipality_escaped.replace(char[0], char[1])
    extract.header['municipality_escaped'] = municipality_escaped

    municipalitylogodir = '/'.join(
        [request.registry.settings['localhost_url'], 'proj/images/ecussons/'])
    municipalitylogopath = municipalitylogodir + municipality_escaped + '.png'
    extract.header['municipalitylogopath'] = municipalitylogopath
    legenddir = '/'.join(
        [request.registry.settings['localhost_url'], 'proj/images/icons/'])

    # Get the raw feature BBOX
    extract.basemap['bbox'] = get_feature_bbox(id)
    bbox = extract.basemap['bbox']

    if bbox is False:
        log.warning('Found more then one bbox for id: %s' % id)
        return False

    # Get the feature center
    extract.basemap['feature_center'] = get_feature_center(id)
    feature_center = extract.basemap['feature_center']

    if feature_center is False:
        log.warning('Found more then one geometry for id: %s' % id)
        return False

    # Get the print BOX
    print_box = get_print_format(
        bbox, request.registry.settings['pdf_config']['fitratio'])
    map_bbox = get_mapbox(feature_center, print_box['scale'], map_buffer,
                          print_box['height'], print_box['width'],
                          request.registry.settings['pdf_config']['fitratio'])

    log.warning('Calling feature: %s' % request.route_url('get_property') +
                '?id=' + id)

    wkt_polygon = ''.join([
        'POLYGON((',
        str(bbox['minX']) + ' ',
        str(bbox['minY']) + ',',
        str(bbox['minX']) + ' ',
        str(bbox['maxY']) + ',',
        str(bbox['maxX']) + ' ',
        str(bbox['maxY']) + ',',
        str(bbox['maxX']) + ' ',
        str(bbox['minY']) + ',',
        str(bbox['minX']) + ' ',
        str(bbox['minY']), '))'
    ])

    basemap = {
        "projection":
        "EPSG:" + str(extract.srid),
        "dpi":
        150,
        "rotation":
        0,
        "center":
        feature_center,
        "scale":
        print_box['scale'] * map_buffer,
        "longitudeFirst":
        "true",
        "layers": [{
            "type": "geojson",
            "geoJson": request.route_url('get_property') + '?id=' + id,
            "style": {
                "version": "2",
                "strokeColor": "gray",
                "strokeLinecap": "round",
                "strokeOpacity": 0.6,
                "[INTERSECTS(geometry, " + wkt_polygon + ")]": {
                    "symbolizers": [{
                        "strokeColor": "red",
                        "strokeWidth": 2,
                        "type": "line"
                    }]
                }
            }
        }, wmts_layer_]
    }

    data = []
    topicdata = {}
    topicdata["doclist"] = []
    appconfig = extract.baseconfig
    concernedtopics = []
    notconcernedtopics = []
    emptytopics = []

    for topic in extract.topics:

        # for the federal data layers we get the restrictions calling the feature service and store the result in the DB
        if topic.topicid in extract.baseconfig['ch_topics']:
            xml_layers = []
            for xml_layer in topic.layers:
                xml_layers.append(xml_layer.layername)
            #  get_XML(feature['geom'], topic.topicid, extractcreationdate, lang, translations)

        topicdata[str(topic.topicid)] = {
            "categorie": 0,
            "docids": set([]),
            "topicname": topic.topicname,
            "bboxlegend": [],
            "layers": {},
            "legalbase": [],
            "legalprovision": [],
            "reference": [],
            "authority": {
                "authorityuuid": topic.authority.authorityid,
                "authorityname": topic.authority.authorityname,
                "authorityurl": topic.authority.authoritywww
            },
            "topicorder": topic.topicorder,
            "authorityfk": topic.authorityfk,
            "publicationdate": topic.publicationdate
        }

        # if geographic layers are defined for the topic, get the list of all layers and then
        # check for each layer the information regarding the features touching the property
        if topic.layers:
            topicdata[str(topic.topicid)]['wmslayerlist'] = []

            for layer in topic.layers:
                topicdata[str(topic.topicid)]["layers"][layer.layerid] = {
                    "layername": layer.layername,
                    "features": None
                }
                topicdata[str(topic.topicid)]['wmslayerlist'].append(
                    layer.layername)

                # intersects a given layer with the feature and adds the results to the topicdata- see method add_layer
                layerdata = add_layer(layer, propertynumber, featureinfo,
                                      translations, appconfig, topicdata)
                if layerdata is not None:
                    topicdata[str(topic.topicid)]['layers'][
                        layer.layerid]['features'] = layerdata[str(
                            layer.layerid)]['features']
                    topicdata[str(topic.topicid)]['categorie'] = 3
                    for restrictionid in layerdata[str(layer.layerid)]['ids']:
                        docfilters = [restrictionid]
                        for doctype in appconfig['doctypes'].split(','):
                            docfilters.append(doctype)
                            docidlist = get_document_ref(docfilters)
                            docs = set_documents(
                                str(layer.topicfk), doctype, docidlist,
                                featureinfo, False,
                                topicdata[str(topic.topicid)]['docids'])
                            for doc in docs:
                                topicdata[str(topic.topicid)]['docids'].add(
                                    doc['documentid'])
                                del doc['documentid']
                                topicdata[str(
                                    topic.topicid)][doctype].append(doc)
                else:
                    topicdata[str(topic.topicid)]['layers'][layer.layerid] = {
                        'layername': layer.layername,
                        'features': None
                    }
                    if topicdata[str(topic.topicid)]['categorie'] != 3:
                        topicdata[str(topic.topicid)]['categorie'] = 1

                # get the legend entries in the map bbox not touching the features
                featurelegend = get_legend_classes(
                    to_shape(featureinfo['geom']), layer.layername,
                    translations, extract.srid)
                bboxlegend = get_legend_classes(to_shape(WKTElement(map_bbox)),
                                                layer.layername, translations,
                                                extract.srid)
                bboxitems = set()
                for legenditem in bboxlegend:
                    if legenditem not in featurelegend:
                        bboxitems.add(tuple(legenditem.items()))
                if len(bboxitems) > 0:
                    for el in bboxitems:
                        legendclass = dict((x, y) for x, y in el)
                        legendclass['codegenre'] = legenddir + legendclass[
                            'codegenre'] + ".png"
                        topicdata[str(
                            topic.topicid)]["bboxlegend"].append(legendclass)
            # Get the list of documents related to a topic with layers and results
            if topicdata[str(layer.topicfk)]["categorie"] == 3:
                docfilters = [str(topic.topicid)]
                for doctype in appconfig["doctypes"].split(','):
                    docidlist = get_document_ref(docfilters)
                    docs = set_documents(
                        str(topic.topicid), doctype, docidlist, featureinfo,
                        True, topicdata[str(topic.topicid)]['docids'])
                    for doc in docs:
                        topicdata[str(topic.topicid)]['docids'].add(
                            doc['documentid'])
                        del doc['documentid']
                        topicdata[str(topic.topicid)][doctype].append(doc)
        else:
            if str(topic.topicid) in appconfig['emptytopics']:
                emptytopics.append(topic.topicname)
                topicdata[str(topic.topicid)]['layers'] = None
                topicdata[str(topic.topicid)]['categorie'] = 0
            else:
                topicdata[str(topic.topicid)]['layers'] = None
                topicdata[str(topic.topicid)]['categorie'] = 1

        if topicdata[str(topic.topicid)]['categorie'] == 1:
            notconcernedtopics.append(topic.topicname)

        if topicdata[topic.topicid]["categorie"] == 3:
            appendiceslist = []

            for i, legalprovision in enumerate(topicdata[str(
                    topic.topicid)]["legalprovision"]):
                if legalprovision["officialtitle"] != "":
                    appendiceslist.append(
                        ['A' + str(i + 1), legalprovision["officialtitle"]])
            for doctype in appconfig["doctypes"].split(','):
                if topicdata[str(topic.topicid)][doctype] == []:
                    topicdata[str(topic.topicid)][doctype] = [{
                        "officialtitle": "",
                        "title": "",
                        "remoteurl": ""
                    }]

            concernedtopics.append({
                "topicname": topic.topicname,
                "documentlist": {
                    "columns": ["appendixno", "appendixtitle"],
                    "data": appendiceslist
                }
            })

            if topicdata[topic.topicid]['layers']:
                topicdata[str(topic.topicid)]["restrictions"] = []

                for layer in topicdata[topic.topicid]['layers']:
                    if topicdata[topic.topicid]['layers'][layer]['features']:
                        for feature in topicdata[
                                topic.topicid]['layers'][layer]['features']:
                            if 'teneur' in feature.keys(
                            ) and feature['teneur'] is not None and feature[
                                    'statutjuridique'] is not None:
                                if feature['codegenre'] is None:
                                    feature['codegenre'] = '9999'
                                if isinstance(feature['codegenre'], int):
                                    feature['codegenre'] = str(
                                        feature['codegenre'])
                                if feature['geomType'] == 'area':
                                    topicdata[str(
                                        topic.topicid
                                    )]["restrictions"].append({
                                        "codegenre":
                                        legenddir + feature['codegenre'] +
                                        ".png",
                                        "teneur":
                                        feature['teneur'],
                                        "area":
                                        feature['intersectionMeasure'].replace(
                                            ' : ',
                                            '').replace(' - ', '').replace(
                                                '[m2]', 'm<sup>2</sup>'),
                                        "area_pct":
                                        round((float(
                                            feature['intersectionMeasure'].
                                            replace(' : ', '').replace(
                                                ' - ', '').replace(
                                                    ' [m2]', '')) * 100) /
                                              propertyarea, 1)
                                    })
                                else:
                                    topicdata[str(
                                        topic.topicid
                                    )]["restrictions"].append({
                                        "codegenre":
                                        legenddir + feature['codegenre'] +
                                        ".png",
                                        "teneur":
                                        feature['teneur'],
                                        "area":
                                        feature['intersectionMeasure'].replace(
                                            ' - ', '').replace(' : ',
                                                               '').replace(
                                                                   '[m]', 'm'),
                                        "area_pct":
                                        -1
                                    })
                            else:
                                for property, value in feature.iteritems():
                                    if value is not None and property != 'featureClass':
                                        if isinstance(value,
                                                      float) or isinstance(
                                                          value, int):
                                            value = str(value)
            topiclayers = {
                "baseURL": request.registry.settings['crdppf_wms'],
                "opacity": 1,
                "type": "WMS",
                "layers": topicdata[str(topic.topicid)]['wmslayerlist'],
                "imageFormat": "image/png",
                "styles": "default",
                "customParams": {
                    "TRANSPARENT": "true"
                }
            }

            # This define the "general" map that we are going to copy x times,
            # one time as base map and x times as topic map.
            map = {
                "projection":
                "EPSG:" + str(extract.srid),
                "dpi":
                150,
                "rotation":
                0,
                "center":
                feature_center,
                "scale":
                print_box['scale'] * map_buffer,
                "longitudeFirst":
                "true",
                "layers": [{
                    "type":
                    "geojson",
                    "geoJson":
                    request.route_url('get_property') + '?id=' + id,
                    "style": {
                        "version": "2",
                        "strokeColor": "gray",
                        "strokeLinecap": "round",
                        "strokeOpacity": 0.6,
                        "[INTERSECTS(geometry, " + wkt_polygon + ")]": {
                            "symbolizers": [{
                                "strokeColor": "red",
                                "strokeWidth": 2,
                                "type": "line"
                            }]
                        }
                    }
                }, topiclayers, basemaplayers]
            }

            if topicdata[str(topic.topicid)]["bboxlegend"] == []:
                topicdata[str(topic.topicid)]["bboxlegend"] = [{
                    "codegenre": "",
                    "teneur": ""
                }]

            data.append({
                "topicname":
                topic.topicname,
                "map":
                map,
                "restrictions":
                topicdata[str(topic.topicid)]["restrictions"],
                "bboxlegend":
                topicdata[str(topic.topicid)]["bboxlegend"],
                "completelegend":
                extract.topiclegenddir + str(topic.topicid) +
                '_topiclegend.pdf',
                "legalbases":
                topicdata[str(topic.topicid)]["legalbase"],
                "legalprovisions":
                topicdata[str(topic.topicid)]["legalprovision"],
                "references":
                topicdata[str(topic.topicid)]["reference"],
                "authority": [topicdata[str(topic.topicid)]["authority"]]
            })

    d = {
        "attributes": {
            "reporttype":
            type,
            "directprint":
            directprint,
            "extractcreationdate":
            extract.creationdate,
            "filename":
            extract.filename,
            "extractid":
            extract.id,
            "map":
            basemap,
            "municipality":
            municipality,
            "cadastre":
            cadastre,
            "cadastrelabel":
            "Cadastre",
            "propertytype":
            propertytype,
            "propertynumber":
            propertynumber,
            "EGRIDnumber":
            featureinfo['no_egrid'],
            "municipalitylogopath":
            municipalitylogopath,
            "federalmunicipalitynumber":
            featureinfo['nufeco'],
            "competentauthority":
            extract.baseconfig['competentauthority'],
            "titlepage": [{
                "title": report_title,
                "certificationinstance": "",
                "certificationtext": "",
            }],
            "concernedtopics":
            concernedtopics,
            "notconcernedtopics":
            ";".join(notconcernedtopics),
            "emptytopics":
            ";".join(emptytopics),
            "propertyarea":
            propertyarea,
            "datasource":
            data
        },
        "layout": "report",
        "outputFormat": "pdf"
    }

    # import json
    # pretty printed json data for the extract
    # jsonfile = open('C:/Temp/extractdata.json', 'w')
    # jsondata = json.dumps(d, indent=4)
    # jsonfile.write(jsondata)
    # jsonfile.close()
    return d