def getCustomLayerDefaultDefinition(layerName): layerName = UTILS.normalizeName(layerName) return xmltodict.parse("""<?xml version=\"1.0\" encoding=\"UTF-8\"?> <CONTENT> <Name>GH:""" + layerName + """</Name> <Title>"""+layerName +"""</Title> </CONTENT>""")["CONTENT"]
def yatr_hnd(t, s, p): if not p: reply(t, s, u'Что переводить будем?') return err = {'401':u'Неправильный ключ API','402':u'Ключ API заблокирован','403':u'Превышено суточное ограничение на количество запросов','404':u'Превышено суточное ограничение на объем переведенного текста','422':u'Текст не может быть переведен'} i = 'ru' key = 'trnsl.1.1.20130421T140201Z.323e508a33e9d84b.f1e0d9ca9bcd0a00b0ef71d82e6cf4158183d09e' if [x for x in p if ord(x) in range(1040,1103)]: i = 'en' lc = re.findall('[a-z]{2,}-[a-z]{2,}', p) if lc: li = lc[0].split('-') if li[0] in Langs and li[1] in Langs: i = lc[0] p = p.replace(lc[0],'') p = p.encode('utf8','replace') p = urllib2.quote(p) page = urllib.urlopen('https://translate.yandex.net/api/v1.5/tr/translate?key='+key+'&text='+p+'&lang='+i).read() try: page = xmltodict.parse(page) except NameError: reply(t, s, u'Модуль xmltodict не найден!') return if page.get(u'Translation', None): body = page[u'Translation']['text'] else: reply(t, s, err[page[u'Error']['@code']]) return reply(t, s, page[u'Translation']['@lang']+':\n'+body)
def getCurrentCapabilitiesDoc(directory): capabilitiesPath = os.path.join(directory, "getCapabilities.xml") if os.path.isfile(capabilitiesPath): with io.open(capabilitiesPath, mode="r", encoding="utf-8") as capabilitiesFile: capabilitiesContent = capabilitiesFile.read() else: capabilitiesContent = WMSCapabilities.getDefaultCapabilities() doc = xmltodict.parse(capabilitiesContent) return doc
def getAllLayers(content, layerAttr='1'): #FIXME should use the style name to identify the layer attributes doc = xmltodict.parse(content) allLayers = [] try: layerDefinitions = doc['WMT_MS_Capabilities']['Capability']['Layer']['Layer'] if layerDefinitions is not None and isinstance(layerDefinitions, list) and len(layerDefinitions): allLayers = [curLayer["Name"] for curLayer in layerDefinitions] elif layerDefinitions is not None and isinstance(doc['WMT_MS_Capabilities']['Capability']['Layer']['Layer'], collections.OrderedDict) and len(layerDefinitions): allLayers = [layerDefinitions["Name"]] except KeyError as e: pass return allLayers
def getAllCustomLayers(content, layerAttr='1'): doc = xmltodict.parse(content) allCustomLayers = [] if 'CustomLayer' in doc['WMT_MS_Capabilities']['Capability']['VendorSpecificCapabilities']: layerDefinitions = doc['WMT_MS_Capabilities']['Capability']['VendorSpecificCapabilities']['CustomLayer'] if layerDefinitions is not None and isinstance(layerDefinitions, collections.OrderedDict): layerDefinitions = [layerDefinitions] if layerDefinitions is not None and isinstance(layerDefinitions, list) and len(layerDefinitions) > 0: for curLayer in layerDefinitions: for operation in WMSCapabilities.getOperationsListForCustomLayer(curLayer): allCustomLayers.append(curLayer["Name"] + ";" + operation['attribute']) #allCustomLayers.append(curLayer["Name"] + ":" + operation['attribute'] + ":" + operation['type']) return allCustomLayers
def cat_newznab(url): categories = [{'id': '0', 'name': 'Everything'}] try: result = xmltodict.parse(urllib.urlopen(url + '/api?t=caps&o=xml').read()) except: return [] for cat in result['caps']['categories']['category']: category = {'label': cat['@name'], 'id': cat['@id']} category['value'] = [{'name': x['@name'], 'id': x['@id']} for x in cat['subcat']] for subcat in category['value']: subcat['name'] = '%s: %s' % (category['label'], subcat['name']) categories.append(category) return categories
def cat_newznab(url): categories = [{"id": "0", "name": "Everything"}] try: result = xmltodict.parse(urllib.urlopen(url + "/api?t=caps&o=xml").read()) except: return [] for cat in result["caps"]["categories"]["category"]: category = {"label": cat["@name"], "id": cat["@id"]} category["value"] = [{"name": x["@name"], "id": x["@id"]} for x in cat["subcat"]] for subcat in category["value"]: subcat["name"] = "%s: %s" % (category["label"], subcat["name"]) categories.append(category) return categories
def addOperation(customDoc, layerAttr, operation): layerAttr = UTILS.normalizeName(layerAttr) if not "Operation" in customDoc: customDoc["Operation"] = list() if isinstance(customDoc["Operation"], collections.OrderedDict): customDoc["Operation"] = [customDoc["Operation"]] found = False for curOperationDict in customDoc["Operation"]: if not found and '@type' in curOperationDict and curOperationDict['@type'] == operation and curOperationDict['#text'] == layerAttr: found = True if not found: customDoc['Operation'].append(xmltodict.parse("""<?xml version=\"1.0\" encoding=\"UTF-8\"?> <CONTENT> <Operation type=\"""" + operation + """\">""" + layerAttr + """</Operation> </CONTENT>""")["CONTENT"]["Operation"]) return customDoc
def cat_newznab(url): categories = [{'id': '0', 'name': 'Everything'}] try: result = xmltodict.parse(urllib.urlopen(url + '/api?t=caps&o=xml').read()) except: return [] for cat in result['caps']['categories']['category']: category = {'id': cat['@id'], 'label': cat['@name'], 'value': {}} i = 0 for x in cat['subcat']: try: category['value'][i] = {'name': x['@name'], 'id': x['@id']} i += 1 except Exception, e: pass categories.append(category)
def cat_newznab(url): categories = [{'id': '0', 'name': 'Everything'}] try: result = xmltodict.parse( urllib.urlopen(url + '/api?t=caps&o=xml').read()) except: return [] for cat in result['caps']['categories']['category']: category = {'id': cat['@id'], 'label': cat['@name'], 'value': {}} i = 0 for x in cat['subcat']: try: category['value'][i] = {'name': x['@name'], 'id': x['@id']} i += 1 except Exception, e: pass categories.append(category)
def block(apple_id): """ Blocking Apple ID :param apple_id: e-mail address """ url = "https://setup.icloud.com/setup/iosbuddy/loginDelegates" xml_send = open("{}/data.xml".format(scriptdir)).read().format(apple_id, conf["pass"]) headers = {'user-agent': 'Accounts/113 CFNetwork/672.0.8 Darwin/14.0.0', 'Proxy-Connection': 'keep-alive', 'Accept': '*/*', 'Accept-Encoding': 'utf-8', 'Accept-Language': 'en-us', 'X-MMe-Country': 'US', 'Connection': 'keep-alive', 'X-MMe-Client-Info': '<iPhone4,1> <iPhone OS;7.0.4;11B554a> <com.apple.AppleAccount/1.0 (com.apple.Accounts/113)>', 'Content-Type': 'text/plist', 'Content-length': str(len(xml_send))} req = request.Request(url, data=bytes(xml_send, encoding="utf-8"), headers=headers, method="POST") while True: xml_resp = request.urlopen(req, timeout=5.0).read().decode("utf-8") xml_data = parse(xml_resp) status = xml_data["plist"]["dict"]["string"] if status == 'This Apple ID has been disabled for security reasons.': print(_("Apple ID <{}> has blocked!").format(apple_id)) break
def newznab(site, category, maxage, term, mobile=False): site = int(site) newznab = NewznabSite.query.filter(NewznabSite.id == site).first() url = newznab.url apikey = newznab.apikey categories = cat_newznab(url) try: url += '/api?t=search&o=xml&apikey=%s&maxage=%s' % (apikey, maxage) if category != '0' and category != 'undefined': url += '&cat=%s' % category if term: url += '&q=%s' % urllib.quote(term) logger.log( 'SEARCH :: %s :: Searching for "%s" in category: %s, max age: %s' % (newznab.name, term, category, maxage), 'INFO') result = xmltodict.parse(urllib.urlopen(url).read())['rss']['channel'] if 'item' in result: result = result['item'] else: result = [] logger.log('SEARCH :: No results found', 'INFO') except Exception as e: logger.log(e, 'DEBUG') result = [] def parse_item(item): if isinstance(item, dict): for attr in item['newznab:attr']: if attr['@name'] == 'size': size = convert_bytes(attr['@value']) a = { 'nzblink': item['link'], 'details': item['guid']['#text'], 'title': item['title'].decode('utf-8'), 'category': item['category'], 'size': size } return a if isinstance(result, dict): results = [parse_item(result)] else: results = [parse_item(x) for x in result] if mobile: return results return render_template('search-results.html', site=site, results=results, term=term, categories=categories, category=category, maxage=int(maxage), newznab_sites=get_newznab_sites())
def monitor(monitor_type, para={}): date = datetime.datetime.now() priv_error_log_date = date - datetime.timedelta(hours=1) is_loop = 0 delay_dict_info = {} restart_times = 0 while True: try: if monitor_type == 'mq': sleep_time = 300 time.sleep(10) mq_monitor_date = datetime.datetime.now() ip = para['ip'] admin_user = para['admin_user'] admin_passwd = para['admin_passwd'] admin_web_port = para['admin_web_port'] active_mq_http_baseurl = 'http://%s:%s/api/jolokia' % ( ip, admin_web_port) check_health_url = '%s/read/org.apache.activemq:type=Broker,brokerName=localhost,service=Health/CurrentStatus' % active_mq_http_baseurl queue_view_url = 'http://%s:%s/admin/xml/queues.jsp' % ( ip, admin_web_port) passman = urllib2.HTTPPasswordMgrWithDefaultRealm() passman.add_password(None, queue_view_url, admin_user, admin_passwd) auth_handler = urllib2.HTTPBasicAuthHandler(passman) opener = urllib2.build_opener(auth_handler) urllib2.install_opener(opener) response = urllib2.urlopen(queue_view_url) xml_resContent = response.read() queues_json = xmltodict.parse(xml_resContent) mq_logger.info( mq_monitor_date.strftime('%Y-%m-%d %H:%M:%S') + '的MQ队列情况如下(表名,消息数量(大概多久同步完(s)),消费者数量,进队数量,出队数量):') for queue in queues_json['queues']['queue']: queue_name = queue['@name'] if is_loop == 0: delay_dict_info[queue_name] = {} delay_dict_info[queue_name]['priv_size'] = queue[ 'stats']['@size'] delay_dict_info[queue_name][ 'priv_consumerCount'] = queue['stats'][ '@consumerCount'] delay_dict_info[queue_name][ 'priv_enqueueCount'] = queue['stats'][ '@enqueueCount'] delay_dict_info[queue_name][ 'priv_dequeueCount'] = queue['stats'][ '@dequeueCount'] delay = '监控刚启动,未计算所需同步时间' else: delay_dict_info[queue_name]['this_size'] = queue[ 'stats']['@size'] delay_dict_info[queue_name][ 'this_consumerCount'] = queue['stats'][ '@consumerCount'] delay_dict_info[queue_name][ 'this_enqueueCount'] = queue['stats'][ '@enqueueCount'] delay_dict_info[queue_name][ 'this_dequeueCount'] = queue['stats'][ '@dequeueCount'] if int(delay_dict_info[queue_name]['this_dequeueCount'] ) - int(delay_dict_info[queue_name] ['priv_dequeueCount']) != 0: delay_seconds = 1.0 * sleep_time * int( delay_dict_info[queue_name]['this_size']) / ( int(delay_dict_info[queue_name] ['this_dequeueCount']) - int(delay_dict_info[queue_name] ['priv_dequeueCount'])) delay_seconds = float('%.2f' % delay_seconds) delay = str(delay_seconds) + 's' if delay_seconds > 900: warning_log = '%s 表预估延迟有可能超过900s,当前队列中未消费数量:%s, 请检查!(如果队列数量较小,请忽略报警)' % ( queue['@name'], queue['stats']['@size']) mq_logger.warning(warning_log) else: delay = str(sleep_time) + 's内消费数量为0,暂时无法计算' if int(delay_dict_info[queue_name] ['this_size']) >= 500: warning_log = '%s 表剩余未消费数量超过500条,且过去5分钟没有数据被消费,请检查!' % queue[ '@name'] mq_logger.warning(warning_log) if queue['stats']['@size'] == '0': delay = '无延迟' mq_logger.info('%s,%s(所需同步时间:%s),%s,%s,%s' % (queue['@name'], queue['stats']['@size'], delay, queue['stats']['@consumerCount'], queue['stats']['@enqueueCount'], queue['stats']['@dequeueCount'])) if int(queue['stats']['@consumerCount']) == 0: error_log = 'Number of consumer(s) on queue %s(MQ IP:%s port:%s) is 0, please check!!!' % ( queue['@name'], ip, admin_web_port) mq_logger.error(error_log) if is_loop == 1: delay_dict_info[queue_name]['priv_size'] = queue[ 'stats']['@size'] delay_dict_info[queue_name][ 'priv_consumerCount'] = queue['stats'][ '@consumerCount'] delay_dict_info[queue_name][ 'priv_enqueueCount'] = queue['stats'][ '@enqueueCount'] delay_dict_info[queue_name][ 'priv_dequeueCount'] = queue['stats'][ '@dequeueCount'] time.sleep(sleep_time - 10) mq_logger.info('\n\n\n') is_loop = 1 if monitor_type == 'extractor' or monitor_type == 'applier': time.sleep(60) ip = para['ip'] ssh_user = para['ssh_user'] ssh_port = para['ssh_port'] app_dir = para['app_dir'] app_pattern = para['app_pattern'] monitor_cmd = 'ps -ef | grep %s | grep -v grep' % (app_pattern) monitor_result = ssh_outs(ip=ip, port=ssh_port, cmd=monitor_cmd, user=ssh_user) if monitor_result['status'] == 'failure': if monitor_result['message']: error_info = '%s %s on %s monitor error, ssh cmd ps failed !!!' % ( monitor_type, app_pattern, ip) else: error_info = "%s %s on %s is NOT running!!!" % ( monitor_type, app_pattern, ip) if monitor_type == 'extractor': if restart_times < 3: restart_cmd = "cd %s; nohup ./run.sh >>restart.log 2>&1 &" % ( app_dir) restart_result = ssh_outs(ip=ip, port=ssh_port, cmd=restart_cmd, user=ssh_user) dataflume_logger.warning( '%s %s on %s stopped, now I tryed to start it up! %s times!' % (monitor_type, app_pattern, ip, restart_times + 1)) restart_times = restart_times + 1 time.sleep(10) continue else: dataflume_logger.error( '%s %s on %s stopped, and I have tried 3 times, and I cannot start it up again!!!' % (monitor_type, app_pattern, ip)) dataflume_logger.critical(error_info + '\n' + monitor_result["message"]) # send SMS error_info else: restart_times = 0 monitor_log_cmd = """ ls --full-time %s/log/error.log | awk '{print $6 " " $7}' | sed 's/\..*$//g' """ % ( app_dir) monitor_log_result = ssh_outs(ip=ip, port=ssh_port, cmd=monitor_log_cmd, user=ssh_user) if monitor_log_result['status'] == 'failure': error_log = '%s %s on %s monitor error, ssh cmd ls failed!!!' % ( monitor_type, app_pattern, ip) dataflume_logger.critical(error_info + '\n' + monitor_result["message"]) # send SMS error_info else: error_log_date_str = monitor_log_result["data"].strip() error_log_date = datetime.datetime.strptime( error_log_date_str, "%Y-%m-%d %H:%M:%S") if priv_error_log_date >= error_log_date: dataflume_logger.info( '%s %s on %s is running, app log is OK!' % (monitor_type, app_pattern, ip)) else: error_log = '%s %s on %s new error log found since %s' % \ (monitor_type, app_pattern, ip, priv_error_log_date.strftime("%Y-%m-%d %H:%M:%S")) dataflume_logger.error(error_log) # send SMS error_info priv_error_log_date = error_log_date except: trace_info = traceback.format_exc() print trace_info
def updateXML(directory, layerExtents, layerMercatorExtents, isShapefile, layerTitle, layerAttr, maxZoom, downloadLink): doc = WMSCapabilities.getCurrentCapabilitiesDoc(directory) filename = UTILS.normalizeName(layerTitle) #layer Name no qgis # #extents, projection projMaxX = layerExtents.xMaximum() projMinX = layerExtents.xMinimum() projMaxY = layerExtents.yMaximum() projMinY = layerExtents.yMinimum() # llExtent = UTILS.getMapExtent(layer, QgsCoordinateReferenceSystem('EPSG:4326')) latMaxX = layerMercatorExtents.xMaximum() latMinX = layerMercatorExtents.xMinimum() latMaxY = layerMercatorExtents.yMaximum() latMinY = layerMercatorExtents.yMinimum() # isShapefile = (isinstance(layer, QgsVectorLayer)) if 'Layer' in doc['WMT_MS_Capabilities']['Capability']['Layer']: if type(doc['WMT_MS_Capabilities']['Capability']['Layer']['Layer']) is collections.OrderedDict: curLayer = doc['WMT_MS_Capabilities']['Capability']['Layer']['Layer'] doc['WMT_MS_Capabilities']['Capability']['Layer']['Layer'] = [curLayer] if 'Layer' in doc['WMT_MS_Capabilities']['Capability']['Layer']: for iLayer in range(len(doc['WMT_MS_Capabilities']['Capability']['Layer']['Layer']) - 1, -1, -1): curLayer = doc['WMT_MS_Capabilities']['Capability']['Layer']['Layer'][iLayer] if "Name" in curLayer and curLayer["Name"] == "GH:" + filename: doc['WMT_MS_Capabilities']['Capability']['Layer']['Layer'].pop(iLayer) #FIXME One attribute only. (Is always overwriting) newLayerDescription = xmltodict.parse( WMSCapabilities.getMapDescription(filename, layerAttr, latMinX, latMinY, latMaxX, latMaxY, projMinX, projMinY, projMaxX, projMaxY, maxZoom, isShapefile, downloadLink))['CONTENT'] if 'Layer' in doc['WMT_MS_Capabilities']['Capability']['Layer']: doc['WMT_MS_Capabilities']['Capability']['Layer']['Layer'].append(newLayerDescription['Layer']) else: doc['WMT_MS_Capabilities']['Capability']['Layer'] = newLayerDescription newTileSetDescription = xmltodict.parse( WMSCapabilities.getTileSetDescription(filename, latMinX, latMinY, latMaxX, latMaxY, projMinX, projMinY, projMaxX, projMaxY))['CONTENT'] if doc['WMT_MS_Capabilities']['Capability']['VendorSpecificCapabilities'] is not None and 'TileSet' in \ doc['WMT_MS_Capabilities']['Capability']['VendorSpecificCapabilities']: if type(doc['WMT_MS_Capabilities']['Capability']['VendorSpecificCapabilities'][ 'TileSet']) is collections.OrderedDict: curTileSet = doc['WMT_MS_Capabilities']['Capability']['VendorSpecificCapabilities']['TileSet'] doc['WMT_MS_Capabilities']['Capability']['VendorSpecificCapabilities']['TileSet'] = [ curTileSet] # Transforms into a list if doc['WMT_MS_Capabilities']['Capability']['VendorSpecificCapabilities'] is not None and 'TileSet' in \ doc['WMT_MS_Capabilities']['Capability']['VendorSpecificCapabilities']: for iTileSet in range( len(doc['WMT_MS_Capabilities']['Capability']['VendorSpecificCapabilities']['TileSet']) - 1, -1, -1): curTileSet = doc['WMT_MS_Capabilities']['Capability']['VendorSpecificCapabilities']['TileSet'][iTileSet] if "Layers" in curTileSet and curTileSet["Layers"] == "GH:" + filename: doc['WMT_MS_Capabilities']['Capability']['VendorSpecificCapabilities']['TileSet'].pop(iTileSet) doc['WMT_MS_Capabilities']['Capability']['VendorSpecificCapabilities']['TileSet'] += newTileSetDescription[ 'TileSet'] # Dois elementos c mesma chave representa com lista else: doc['WMT_MS_Capabilities']['Capability']['VendorSpecificCapabilities'] = newTileSetDescription WMSCapabilities.saveCurrentCapabilities(directory, doc)
def newznab(site, category, maxage, term, mobile=False): site = int(site) newznab = NewznabSite.query.filter(NewznabSite.id == site).first() url = newznab.url apikey = newznab.apikey categories = cat_newznab(url) try: url += '/api?t=search&o=xml&apikey=%s&maxage=%s' % (apikey, maxage) if category != '0' and category != 'undefined': url += '&cat=%s' % category if term: url += '&q=%s' % urllib.quote(term) logger.log('SEARCH :: %s :: Searching for "%s" in category: %s, max age: %s' % (newznab.name, term, category, maxage), 'INFO') result = xmltodict.parse(urllib.urlopen(url).read())['rss']['channel'] if 'item' in result: result = result['item'] else: result = [] logger.log('SEARCH :: No results found', 'INFO') except Exception as e: logger.log(e, 'DEBUG') result = [] def parse_item(item): if isinstance(item, dict): for attr in item['newznab:attr']: if attr['@name'] == 'size': size = convert_bytes(attr['@value']) a = { 'nzblink': item['link'], 'details': item['guid']['#text'], 'title': item['title'], 'category': item['category'], 'size': size } return a if isinstance(result, dict): results = [parse_item(result)] else: results = [parse_item(x) for x in result] if mobile: return results return render_template('search-results.html', site=site, results=results, term=term, categories=categories, category=category, maxage=int(maxage), newznab_sites=get_newznab_sites() )
def __init__(self, filename, resolve_xlink_href=True): def is_geometry(key): for name in ['geometry', 'position', 'the_geom']: if name in key.lower(): return True return False def postprocessor(path, key, value): # remove wfs namespace key = key.replace('wfs:', '') # normalize FeatureCollection, member, featureMember, featureMembers if 'feature' in str(key.lower()) or 'member' in str(key.lower()): key = key.replace('gml:', '') if not is_geometry(key): return key, value features = {} f = open(filename, mode='r') logging.info('Open file %s' % filename) features = xmltodict.parse(f, postprocessor=postprocessor) # logging.info(json.dumps(features, indent=3)) logging.debug('Container type(%s)' % str(type(features))) logging.debug('Container %s' % features.keys()[0]) # convert single feature (count=1 or maxFeatures=1) to list def prepare(features): if type(features) == OrderedDict: return [features] return features self.__features = None # INSPIRE GML 3.2 if 'base:SpatialDataSet' in features: self.__features = features['base:SpatialDataSet']['base:member'] # WFS or GML if 'FeatureCollection' in features: # GML 3.2 if 'member' in features['FeatureCollection']: self.__features = prepare( features['FeatureCollection']['member']) try: self.__features.extend( features['FeatureCollection']['additionalObjects'] ['SimpleFeatureCollection']['member']) except KeyError: pass # GML 3.1 if 'featureMembers' in features['FeatureCollection']: list = [] for key in features['FeatureCollection'][ 'featureMembers'].keys(): for value in features['FeatureCollection'][ 'featureMembers'][key]: dict = OrderedDict() dict[key] = value list.append(dict) self.__features = list # GML 2.0 if 'featureMember' in features['FeatureCollection']: self.__features = prepare( features['FeatureCollection']['featureMember']) if not self.__features: raise GmlException('Unsupported GML-Container!') logging.debug('Container type(%s)' % str(type(self.__features))) if resolve_xlink_href: logging.info('Resolving xlink:href references') self.__resolve(self.__features)
def newznab(site, category, maxage, term, mobile=False): site = int(site) newznab = NewznabSite.query.filter(NewznabSite.id == site).first() url = newznab.url apikey = newznab.apikey categories = cat_newznab(url) try: url += "/api?t=search&o=xml&apikey=%s&maxage=%s" % (apikey, maxage) if category != "0": url += "&cat=%s" % category if term: url += "&q=%s" % urllib.quote(term) logger.log('SEARCH :: %s :: Searching for "%s" in category: %s' % (site, term, category), "INFO") result = xmltodict.parse(urllib.urlopen(url).read())["rss"]["channel"] if "item" in result: result = result["item"] else: result = [] logger.log("SEARCH :: No results found", "INFO") except Exception as e: logger.log(e, "DEBUG") result = [] def parse_item(item): if isinstance(item, dict): for attr in item["newznab:attr"]: if attr["@name"] == "size": size = convert_bytes(attr["@value"]) a = { "nzblink": item["link"], "details": item["guid"]["#text"], "title": item["title"].decode("utf-8"), "category": item["category"], "size": size, } return a if isinstance(result, dict): results = [parse_item(result)] else: results = [parse_item(x) for x in result] if mobile: return results return render_template( "search-results.html", site=site, results=results, term=term, categories=categories, category=category, maxage=int(maxage), newznab_sites=get_newznab_sites(), )
def __init__(self, filename, resolve_xlink_href=True): def is_geometry(key): for name in ['geometry', 'position', 'the_geom']: if name in key.lower(): return True return False def postprocessor(path, key, value): # remove wfs namespace key = key.replace('wfs:', '') # normalize FeatureCollection, member, featureMember, featureMembers if 'feature' in str(key.lower()) or 'member' in str(key.lower()): key = key.replace('gml:', '') if not is_geometry(key): return key, value features = {} f = open(filename, mode='r') logging.info('Open file %s' % filename) features = xmltodict.parse(f, postprocessor=postprocessor) # logging.info(json.dumps(features, indent=3)) logging.debug('Container type(%s)' % str(type(features))) logging.debug('Container %s' % features.keys()[0]) # convert single feature (count=1 or maxFeatures=1) to list def prepare(features): if type(features) == OrderedDict: return [features] return features self.__features = None # INSPIRE GML 3.2 if 'base:SpatialDataSet' in features: self.__features = features['base:SpatialDataSet']['base:member'] # WFS or GML if 'FeatureCollection' in features: # GML 3.2 if 'member' in features['FeatureCollection']: self.__features = prepare(features['FeatureCollection']['member']) try: self.__features.extend(features['FeatureCollection']['additionalObjects']['SimpleFeatureCollection']['member']) except KeyError: pass # GML 3.1 if 'featureMembers' in features['FeatureCollection']: list = [] for key in features['FeatureCollection']['featureMembers'].keys(): for value in features['FeatureCollection']['featureMembers'][key]: dict = OrderedDict() dict[key] = value list.append(dict) self.__features = list # GML 2.0 if 'featureMember' in features['FeatureCollection']: self.__features = prepare(features['FeatureCollection']['featureMember']) if not self.__features: raise GmlException('Unsupported GML-Container!') logging.debug('Container type(%s)' % str(type(self.__features))) if resolve_xlink_href: logging.info('Resolving xlink:href references') self.__resolve(self.__features)