Пример #1
0
def api_streets():
    start_time = time.time()
    jsonp_callback = request.args.get('callback', None)
    lon = request.args.get('lon', '')
    lat = request.args.get('lat', '')
    radius = request.args.get('radius', '1000')
    if lat == '' or lon == '':
        abort(400)
    lon = float(lon)
    lat = float(lat)
    radius = int(radius)
    radius = min(radius, 500)
    result = db.get_locations(lon, lat, radius)
    ret = {
        'status': 0,
        'duration': round((time.time() - start_time) * 1000),
        'request': {
            'lon': lon,
            'lat': lat,
            'radius': radius
        },
        'response': result
    }
    json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
    if jsonp_callback is not None:
        json_output = jsonp_callback + '(' + json_output + ')'
    response = make_response(json_output, 200)
    response.mimetype = 'application/json'
    response.headers['Expires'] = util.expires_date(hours=24)
    response.headers['Cache-Control'] = util.cache_max_age(hours=24)
    return response
Пример #2
0
def api_providers():
    start_time = time.time()
    provider_id = request.args.get('id', 0, type=int)
    result = []
    providers = SharingProvider.query.filter_by(active=1).filter(
        SharingProvider.id > provider_id).order_by(
            SharingProvider.id).limit(100).all()
    for provider in providers:
        result.append({
            'id': provider.id,
            'name': provider.name,
            'website': provider.website,
            'licence': provider.licence
        })
    ret = {
        'status': 0,
        'duration': round((time.time() - start_time) * 1000),
        'data': result,
        'next': '/api/providers?id=%s' % (provider_id + 100)
    }
    json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
    response = make_response(json_output, 200)
    response.mimetype = 'application/json'
    response.headers['Expires'] = util.expires_date(hours=24)
    response.headers['Cache-Control'] = util.cache_max_age(hours=24)
    return (response)
Пример #3
0
def suche():
  """
  URL-Parameter:
  q: Suchanfrage, nutzer-formuliert
  fq: Filter query (Lucene Syntax)
  sort: Sortierung, z.B. "id asc"
  start: Offset im Suchergebnis
  num: Anzahl der Treffer pro Seite
  date: Datumsbereich als String
  """
  search_settings = {}
  search_settings['r'] = request.form.get('r')
  if not search_settings['r']:
    search_settings['r'] = request.args.get('r', app.config['region_default'])
  search_settings['q'] = request.args.get('q', '')
  search_settings['fq'] = request.args.get('fq', '')
  search_settings['sort'] = request.args.get('sort', '')
  search_settings['start'] = int(request.args.get('start', '0'))
  search_settings['ppp'] = int(request.args.get('ppp', '10'))
  search_settings['ppp'] = min(search_settings['ppp'], 100)  # max 100 items
  search_settings['date'] = request.args.get('date', '')
  html = render_template('suche.html', search_settings=search_settings)
  response = make_response(html, 200)
  response.headers['Expires'] = util.expires_date(hours=24)
  response.headers['Cache-Control'] = util.cache_max_age(hours=24)
  return response
Пример #4
0
def api_traffic_item():
  start_time = time.time()
  traffic_item_id = request.args.get('id', 0, type=int)
  if not traffic_item_id:
    abort(404)
  traffic_item_data = TrafficItem.query.filter_by(id=traffic_item_id).first_or_404()
  traffic_item = {
    'id': traffic_item_data.id,
    'lat': traffic_item_data.lat,
    'lon': traffic_item_data.lon,
    'zoom_from': traffic_item_data.zoom_from,
    'zoom_till': traffic_item_data.zoom_till,
    'external_id': traffic_item_data.external_id,
    'created': traffic_item_data.created,
    'updated': traffic_item_data.updated,
    'properties': {}
  }
  if traffic_item_data.area:
    traffic_item['area'] = json.loads(traffic_item_data.area)
  traffic_item_metas = TrafficItemMeta.query.filter_by(traffic_item_id=traffic_item_data.id).all()
  for traffic_item_meta in traffic_item_metas:
    traffic_item['properties'][traffic_item_meta.key] = traffic_item_meta.value
  ret = {
    'status': 0,
    'duration': round((time.time() - start_time) * 1000),
    'response': traffic_item
  }
  json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
  response = make_response(json_output, 200)
  response.mimetype = 'application/json'
  response.headers['Expires'] = util.expires_date(hours=24)
  response.headers['Cache-Control'] = util.cache_max_age(hours=24)
  return(response)
Пример #5
0
def api_papers_live():
  start_time = time.time()
  jsonp_callback = request.args.get('callback', None)
  paper_string = request.args.get('p', '')
  region = request.args.get('r', app.config['region_default'])
  if not region:
    region = app.config['region_default']
  region = request.args.get('region', region)
  if paper_string:
    result = db.get_papers_live(paper_string, region)
  ret = {
    'status': 0,
    'duration': round((time.time() - start_time) * 1000),
    'request': {
      'p': paper_string,
      'r': region
    },
    'response': result if paper_string else []
  }
  json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
  if jsonp_callback is not None:
    json_output = jsonp_callback + '(' + json_output + ')'
  response = make_response(json_output, 200)
  response.mimetype = 'application/json'
  response.headers['Expires'] = util.expires_date(hours=24)
  response.headers['Cache-Control'] = util.cache_max_age(hours=24)
  return response
Пример #6
0
def api_streets():
    start_time = time.time()
    jsonp_callback = request.args.get("callback", None)
    lon = request.args.get("lon", "")
    lat = request.args.get("lat", "")
    radius = request.args.get("radius", "1000")
    if lat == "" or lon == "":
        abort(400)
    lon = float(lon)
    lat = float(lat)
    radius = int(radius)
    radius = min(radius, 500)
    result = db.get_locations(lon, lat, radius)
    ret = {
        "status": 0,
        "duration": round((time.time() - start_time) * 1000),
        "request": {"lon": lon, "lat": lat, "radius": radius},
        "response": result,
    }
    json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
    if jsonp_callback is not None:
        json_output = jsonp_callback + "(" + json_output + ")"
    response = make_response(json_output, 200)
    response.mimetype = "application/json"
    response.headers["Expires"] = util.expires_date(hours=24)
    response.headers["Cache-Control"] = util.cache_max_age(hours=24)
    return response
Пример #7
0
def api_papers_live():
    start_time = time.time()
    jsonp_callback = request.args.get('callback', None)
    paper_string = request.args.get('p', '')
    region = request.args.get('r', app.config['region_default'])
    if not region:
        region = app.config['region_default']
    region = request.args.get('region', region)
    if paper_string:
        result = db.get_papers_live(paper_string, region)
    ret = {
        'status': 0,
        'duration': round((time.time() - start_time) * 1000),
        'request': {
            'p': paper_string,
            'r': region
        },
        'response': result if paper_string else []
    }
    json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
    if jsonp_callback is not None:
        json_output = jsonp_callback + '(' + json_output + ')'
    response = make_response(json_output, 200)
    response.mimetype = 'application/json'
    response.headers['Expires'] = util.expires_date(hours=24)
    response.headers['Cache-Control'] = util.cache_max_age(hours=24)
    return response
Пример #8
0
def api_station():
  start_time = time.time()
  station_id = request.args.get('id', 0, type=int)
  result = []
  stations = SharingStation.query.filter_by(active=1).filter(SharingStation.id > station_id).order_by(SharingStation.id).limit(100).all()
  for station in stations:
    result.append({
      'id': station.id,
      'name': station.name,
      'slug': station.slug,
      'lat': station.lat,
      'lon': station.lon,
      'vehicle_all': station.vehicle_all,
      'provider_id': station.sharing_provider_id
    })
  ret = {
    'status': 0,
    'duration': round((time.time() - start_time) * 1000),
    'data': result,
    'next': '/api/traffic-items?id=%s' % (station_id + 100)
  }
  json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
  response = make_response(json_output, 200)
  response.mimetype = 'application/json'
  response.headers['Expires'] = util.expires_date(hours=24)
  response.headers['Cache-Control'] = util.cache_max_age(hours=24)
  return(response)
Пример #9
0
def oparl_file_downloadUrl_data(params):
  file_data = db.get_file(deref={'values': ['file']},
                          search_params={'_id': ObjectId(params['file_id'])})

  if len(file_data) == 0:
    # TODO: Rendere informativere 404 Seite
    abort(404)
  file_data = file_data[0]

  # 'file' property is not set (e.g. due to depublication)
  if 'file' not in file_data:
    if 'depublication' in file_data:
      abort(410)  # Gone
    else:
      # TODO: log this as unexplicable...
      abort(500)

  handler = db.get_file_data(file_data['file']['_id'])
  response = make_response(handler.read(), 200)
  response.mimetype = file_data['mimetype']
  response.headers['X-Robots-Tag'] = 'noarchive'
  response.headers['ETag'] = file_data['sha1Checksum']
  response.headers['Last-modified'] = util.rfc1123date(file_data['file']['uploadDate'])
  response.headers['Expires'] = util.expires_date(hours=(24 * 30))
  response.headers['Cache-Control'] = util.cache_max_age(hours=(24 * 30))
  response.headers['Content-Disposition'] = 'attachment; filename=' + file_data['filename']
  return response
Пример #10
0
def oparl_file_downloadUrl_data(params):
  file_data = db.get_file(deref={'values': ['file']},
                          search_params={'_id': ObjectId(params['file_id'])})

  if len(file_data) == 0:
    # TODO: Rendere informativere 404 Seite
    abort(404)
  file_data = file_data[0]
  
  # 'file' property is not set (e.g. due to depublication)
  if 'file' not in file_data:
    if 'depublication' in file_data:
      abort(410)  # Gone
    else:
      # TODO: log this as unexplicable...
      abort(500)

  handler = db.get_file_data(file_data['file']['_id'])
  response = make_response(handler.read(), 200)
  response.mimetype = file_data['mimetype']
  response.headers['X-Robots-Tag'] = 'noarchive'
  response.headers['ETag'] = file_data['sha1Checksum']
  response.headers['Last-modified'] = util.rfc1123date(file_data['file']['uploadDate'])
  response.headers['Expires'] = util.expires_date(hours=(24 * 30))
  response.headers['Cache-Control'] = util.cache_max_age(hours=(24 * 30))
  response.headers['Content-Disposition'] = 'attachment; filename=' + file_data['filename']
  return response
Пример #11
0
def api_streets():
    start_time = time.time()
    jsonp_callback = request.args.get('callback', None)
    lon = request.args.get('lon', '')
    lat = request.args.get('lat', '')
    region = request.args.get('region', app.config['region_default'])
    radius = request.args.get('radius', '1000')
    if lat == '' or lon == '':
        abort(400)
    lon = float(lon)
    lat = float(lat)
    radius = int(radius)
    radius = min(radius, 500)
    streets = db.get_locations(lon, lat, radius)
    result = {}
    # TODO: use msearch for getting paper num
    for street in streets:
        nodes = []
        for point in street['nodes']:
            nodes.append(point['location']['coordinates'])
        if street['name'] in result:
            result[street['name']]['nodes'].append(nodes)
        else:
            search_result = db.query_paper_num(region, street['name'])
            result[street['name']] = {
                'name': street['name'],
                'nodes': [nodes],
                'paper_count': search_result['num']
            }
            if 'name' in search_result:
                result[street['name']]['paper_name'] = search_result['name']
            if 'name' in search_result:
                result[street['name']]['paper_publishedDate'] = search_result[
                    'publishedDate']
    ret = {
        'status': 0,
        'duration': round((time.time() - start_time) * 1000),
        'request': {
            'lon': lon,
            'lat': lat,
            'radius': radius,
            'region': region
        },
        'response': result
    }
    try:
        json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
    except AttributeError:
        return null

    if jsonp_callback is not None:
        json_output = jsonp_callback + '(' + json_output + ')'
    response = make_response(json_output, 200)
    response.mimetype = 'application/json'
    response.headers['Expires'] = util.expires_date(hours=24)
    response.headers['Cache-Control'] = util.cache_max_age(hours=24)
    return response
Пример #12
0
def api_streets():
  start_time = time.time()
  jsonp_callback = request.args.get('callback', None)
  lon = request.args.get('lon', '')
  lat = request.args.get('lat', '')
  region = request.args.get('region', app.config['region_default'])
  radius = request.args.get('radius', '1000')
  if lat == '' or lon == '':
    abort(400)
  lon = float(lon)
  lat = float(lat)
  radius = int(radius)
  radius = min(radius, 500)
  streets = db.get_locations(lon, lat, radius)
  result = {}
  # TODO: use msearch for getting paper num
  for street in streets:
    nodes = []
    for point in street['nodes']:
      nodes.append(point['location']['coordinates'])
    if street['name'] in result:
      result[street['name']]['nodes'].append(nodes)
    else:
      search_result = db.query_paper_num(region, street['name'])
      result[street['name']] = {
        'name': street['name'],
        'nodes': [ nodes ],
        'paper_count': search_result['num']
      }
      if 'name' in search_result:
        result[street['name']]['paper_name'] = search_result['name']
      if 'name' in search_result:
        result[street['name']]['paper_publishedDate'] = search_result['publishedDate']
  ret = {
    'status': 0,
    'duration': round((time.time() - start_time) * 1000),
    'request': {
      'lon': lon,
      'lat': lat,
      'radius': radius,
      'region': region
    },
    'response': result
  }
  try:
    json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
  except AttributeError:
    return null
  
  if jsonp_callback is not None:
    json_output = jsonp_callback + '(' + json_output + ')'
  response = make_response(json_output, 200)
  response.mimetype = 'application/json'
  response.headers['Expires'] = util.expires_date(hours=24)
  response.headers['Cache-Control'] = util.cache_max_age(hours=24)
  return response
Пример #13
0
def attachment_view(attachment_id, extension, savefile=False):
    """
    Abruf/Download eines Attachments
    """
    attachment_info = db.get_attachment(attachment_id)
    #pprint.pprint(attachment_info)
    if attachment_info is None:
        # TODO: Rendere informativere 404 Seite
        abort(404)
    # extension doesn't match file extension (avoiding arbitrary URLs)
    proper_extension = attachment_info['filename'].split('.')[-1]
    if proper_extension != extension:
        abort(404)

    # 'file' property is not set (e.g. due to depublication)
    if 'file' not in attachment_info:
        if 'depublication' in attachment_info:
            abort(410)  # Gone
        else:
            # TODO: log this as unexplicable...
            abort(500)

    # handle conditional GET
    if 'If-Modified-Since' in request.headers:
        file_date = attachment_info['file']['uploadDate'].replace(tzinfo=None)
        request_date = util.parse_rfc1123date(request.headers['If-Modified-Since'])
        difference = file_date - request_date
        if difference < datetime.timedelta(0, 1):  # 1 second
            return Response(status=304)

    #if 'if-none-match' in request.headers:
    #    print "Conditional GET: If-None-Match"
    # TODO: handle ETag in request

    handler = db.get_file(attachment_info['file']['_id'])
    response = make_response(handler.read(), 200)
    response.mimetype = attachment_info['mimetype']
    response.headers['X-Robots-Tag'] = 'noarchive'
    response.headers['ETag'] = attachment_info['sha1']
    response.headers['Last-modified'] = util.rfc1123date(
                    attachment_info['file']['uploadDate'])
    response.headers['Expires'] = util.expires_date(
                                        hours=(24 * 30))
    response.headers['Cache-Control'] = util.cache_max_age(
                                            hours=(24 * 30))
    # Save to file option
    if savefile == True:
        response.headers['Content-Disposition'] = 'attachment; filename=%s' % attachment_info['filename']
        response.headers['X-Robots-Tag'] = 'noindex'
        # See https://support.google.com/webmasters/answer/139394
        response.headers['Link'] = '<%sanhang/%s.%s>; rel="canonical"' % (
            app.config['BASE_URL'], attachment_id, extension)
    return response
Пример #14
0
def suche_feed():
  start_time = time.time()
  jsonp_callback = request.args.get('callback', None)
  q = request.args.get('q', '*:*')
  fq = request.args.get('fq', '')
  date_param = request.args.get('date', '')
  region = request.args.get('r', app.config['region_default'])
  
  # Suche wird durchgeführt
  query = db.query_paper(region=region, q=q, fq=fq, sort='publishedDate:desc', start=0,
             papers_per_page=50, facets=False)
  
  # Generate Root and Metainfos
  search_url = "%s/suche/?r=%s&q=%s&fq=%s" % (app.config['base_url'], region, q, fq)
  feed_url = "%s/suche/feed/?r=%s&q=%s&fq=%s" % (app.config['base_url'], region, q, fq)
  root = etree.Element("rss", version="2.0", nsmap={'atom': 'http://www.w3.org/2005/Atom'})
  channel = etree.SubElement(root, "channel")
  etree.SubElement(channel, "title").text = 'Offenes Ratsinformationssystem: Paper-Feed'
  etree.SubElement(channel, "link").text = search_url
  etree.SubElement(channel, "language").text = 'de-de'
  description = u"Neue oder geänderte Dokumente mit dem Suchbegriff %s in %s" % (q, app.config['regions'][region]['name'])
  # TODO: Einschränkungen mit in die Description
  if fq:
    description += ''
  etree.SubElement(channel, "description").text = description
  etree.SubElement(channel, '{http://www.w3.org/2005/Atom}link', href=feed_url, rel="self", type="application/rss+xml")
  
  # Generate Result Items
  for paper in query['result']:
    item = etree.SubElement(channel, "item")
    paper_link = "%s/paper/%s" % (app.config['base_url'], paper['id'])
    description = 'Link: ' + paper_link + '</br>'
    if 'paperType' in paper:
      description = 'Art des Papers: ' + paper['paperType'] + '<br />'
    if 'publishedDate' in paper:
      description += u"Erstellt am: %s<br />" % dateutil.parser.parse(paper['publishedDate']).strftime('%d.%m.%Y')
    if 'lastModified' in paper:
      description += u"Zuletzt geändert am: %s<br />" % dateutil.parser.parse(paper['lastModified']).strftime('%d.%m.%Y')
    
    etree.SubElement(item, "pubDate").text = util.rfc1123date(paper['lastModified'] if 'lastModified' in paper else datetime.datetime.now())
    etree.SubElement(item, "title").text = paper['name'] if 'name' in paper else 'Kein Titel'
    etree.SubElement(item, "description").text = description
    etree.SubElement(item, "link").text = paper_link
    etree.SubElement(item, "guid").text = paper_link
  
  response = make_response(etree.tostring(root, pretty_print=True), 200)
  response.mimetype = 'application/rss+xml'
  response.headers['Expires'] = util.expires_date(hours=24)
  response.headers['Cache-Control'] = util.cache_max_age(hours=24)
  return response
Пример #15
0
def region_search():
  start_time = time.time()
  result = []
  ret = {
    'status': 0,
    'duration': round((time.time() - start_time) * 1000),
    'response': result
  }
  json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
  response = make_response(json_output, 200)
  response.mimetype = 'application/json'
  response.headers['Expires'] = util.expires_date(hours=24)
  response.headers['Cache-Control'] = util.cache_max_age(hours=24)
  return(response)
Пример #16
0
def api_geocode():
    start = time.time()
    jsonp_callback = request.args.get('callback', None)
    street = request.args.get('street', '')
    if street == '':
        abort(400)
    obj = {'result': util.geocode(street)}
    obj['duration'] = int((time.time() - start) * 1000)
    json_output = json.dumps(obj, sort_keys=True)
    if jsonp_callback is not None:
        json_output = jsonp_callback + '(' + json_output + ')'
    response = make_response(json_output, 200)
    response.mimetype = 'application/json'
    response.headers['Expires'] = util.expires_date(hours=24)
    response.headers['Cache-Control'] = util.cache_max_age(hours=24)
    return response
Пример #17
0
def api_geocode():
    start = time.time()
    jsonp_callback = request.args.get("callback", None)
    street = request.args.get("street", "")
    if street == "":
        abort(400)
    obj = {"result": util.geocode(street)}
    obj["duration"] = int((time.time() - start) * 1000)
    json_output = json.dumps(obj, sort_keys=True)
    if jsonp_callback is not None:
        json_output = jsonp_callback + "(" + json_output + ")"
    response = make_response(json_output, 200)
    response.mimetype = "application/json"
    response.headers["Expires"] = util.expires_date(hours=24)
    response.headers["Cache-Control"] = util.cache_max_age(hours=24)
    return response
Пример #18
0
def oparl_basic(content_fuction, params=None, direct_output=False):
  start_time = time.time()
  jsonp_callback = request.args.get('callback', None)
  if not params:
    params = {}
  request_info = {}
  html = request.args.get('html', False)
  if html:
    request_info['html'] = 1
  extended_info = request.args.get('i')
  extended_info = extended_info == '1'
  if extended_info:
    request_info['i'] = 1
  search_query = request.args.get('q', "")
  if search_query:
    request_info['q'] = search_query
  page = request.args.get('page')
  try:
    page = int(page)
  except (ValueError, TypeError):
    page = 1
  request_info['page'] = page
  params.update(request_info)
  response = content_fuction(params)
  if direct_output:
    return response
  if extended_info:
    ret = {
      'status': 0,
      'duration': int((time.time() - start_time) * 1000),
      'request': request_info,
      'response': response
    }
  else:
    ret = response
  json_output = json.dumps(ret, cls=util.MyEncoder)#, sort_keys=True)
  if jsonp_callback is not None:
    json_output = jsonp_callback + '(' + json_output + ')'
  if html:
    return render_template('oparl.html', data=json.JSONDecoder(object_pairs_hook=OrderedDict).decode(json_output))
  else:
    response = make_response(json_output, 200)
    response.mimetype = 'application/json'
    response.headers['Expires'] = util.expires_date(hours=24)
    response.headers['Cache-Control'] = util.cache_max_age(hours=24)
    response.headers['Access-Control-Allow-Origin'] = '*'
    return response
Пример #19
0
def api_streets():
    start_time = time.time()
    jsonp_callback = request.args.get("callback", None)
    lon = request.args.get("lon", "")
    lat = request.args.get("lat", "")
    region = request.args.get("region", app.config["region_default"])
    radius = request.args.get("radius", "1000")
    if lat == "" or lon == "":
        abort(400)
    lon = float(lon)
    lat = float(lat)
    radius = int(radius)
    radius = min(radius, 500)
    streets = db.get_locations(lon, lat, radius)
    result = {}
    # TODO: use msearch for getting paper num
    for street in streets:
        nodes = []
        for point in street["nodes"]:
            nodes.append(point["location"]["coordinates"])
        if street["name"] in result:
            result[street["name"]]["nodes"].append(nodes)
        else:
            search_result = db.query_paper_num(region, street["name"])
            result[street["name"]] = {"name": street["name"], "nodes": [nodes], "paper_count": search_result["num"]}
            if "name" in search_result:
                result[street["name"]]["paper_name"] = search_result["name"]
            if "name" in search_result:
                result[street["name"]]["paper_publishedDate"] = search_result["publishedDate"]
    ret = {
        "status": 0,
        "duration": round((time.time() - start_time) * 1000),
        "request": {"lon": lon, "lat": lat, "radius": radius, "region": region},
        "response": result,
    }
    try:
        json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
    except AttributeError:
        return null

    if jsonp_callback is not None:
        json_output = jsonp_callback + "(" + json_output + ")"
    response = make_response(json_output, 200)
    response.mimetype = "application/json"
    response.headers["Expires"] = util.expires_date(hours=24)
    response.headers["Cache-Control"] = util.cache_max_age(hours=24)
    return response
Пример #20
0
def attachment_download(attachment_id, extension):
    """
    Download eines Attachments
    """
    attachment_info = db.get_attachment(attachment_id)
    #pprint.pprint(attachment_info)
    if attachment_info is None:
        # TODO: Rendere informativere 404 Seite
        abort(404)
    # extension doesn't match file extension (avoiding arbitrary URLs)
    proper_extension = attachment_info['filename'].split('.')[-1]
    if proper_extension != extension:
        abort(404)

    # 'file' property is not set (e.g. due to depublication)
    if 'file' not in attachment_info:
        if 'depublication' in attachment_info:
            abort(410)  # Gone
        else:
            # TODO: log this as unexplicable...
            abort(500)

    # handle conditional GET
    if 'If-Modified-Since' in request.headers:
        file_date = attachment_info['file']['uploadDate'].replace(tzinfo=None)
        request_date = util.parse_rfc1123date(request.headers['If-Modified-Since'])
        difference = file_date - request_date
        if difference < datetime.timedelta(0, 1):  # 1 second
            return Response(status=304)

    #if 'if-none-match' in request.headers:
    #    print "Conditional GET: If-None-Match"
    # TODO: handle ETag in request

    handler = db.get_file(attachment_info['file']['_id'])
    response = make_response(handler.read(), 200)
    response.mimetype = attachment_info['mimetype']
    response.headers['X-Robots-Tag'] = 'noarchive'
    response.headers['ETag'] = attachment_info['sha1']
    response.headers['Last-modified'] = util.rfc1123date(
                    attachment_info['file']['uploadDate'])
    response.headers['Expires'] = util.expires_date(
                                        hours=(24 * 30))
    response.headers['Cache-Control'] = util.cache_max_age(
                                            hours=(24 * 30))
    return response
Пример #21
0
def api_papers():
    """
  API-Methode zur Suche von Paper
  """
    start_time = time.time()
    jsonp_callback = request.args.get('callback', None)
    ref = request.args.get('reference', '')
    q = request.args.get('q', '*:*')
    fq = request.args.get('fq', '')
    sort = request.args.get('sort', 'score:desc')
    start = int(request.args.get('start', '0'))
    papers_per_page = int(request.args.get('ppp', '10'))
    date_param = request.args.get('date', '')
    region = request.args.get('r', '')
    output = request.args.get('output', '').split(',')
    get_facets = 'facets' in output
    request_info = {}  # Info über die Anfrage

    # Suche wird durchgeführt
    query = db.query_paper(region=region,
                           q=q,
                           fq=fq,
                           sort=sort,
                           start=start,
                           papers_per_page=papers_per_page,
                           facets=get_facets)

    ret = {
        'status': 0,
        'duration': int((time.time() - start_time) * 1000),
        'request': request_info,
        'response': query
    }

    ret['response']['start'] = start
    ret['request']['sort'] = sort
    ret['request']['fq'] = fq

    json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
    if jsonp_callback is not None:
        json_output = jsonp_callback + '(' + json_output + ')'
    response = make_response(json_output, 200)
    response.mimetype = 'application/json'
    response.headers['Expires'] = util.expires_date(hours=24)
    response.headers['Cache-Control'] = util.cache_max_age(hours=24)
    return response
Пример #22
0
def attachment_download(attachment_id, extension):
    """
    Download eines Attachments
    """
    attachment_info = db.get_attachment(attachment_id)
    #pprint.pprint(attachment_info)
    if attachment_info is None:
        # TODO: Rendere informativere 404 Seite
        abort(404)
    # extension doesn't match file extension (avoiding arbitrary URLs)
    proper_extension = attachment_info['filename'].split('.')[-1]
    if proper_extension != extension:
        abort(404)

    # 'file' property is not set (e.g. due to depublication)
    if 'file' not in attachment_info:
        if 'depublication' in attachment_info:
            abort(410)  # Gone
        else:
            # TODO: log this as unexplicable...
            abort(500)

    # handle conditional GET
    if 'If-Modified-Since' in request.headers:
        file_date = attachment_info['file']['uploadDate'].replace(tzinfo=None)
        request_date = util.parse_rfc1123date(
            request.headers['If-Modified-Since'])
        difference = file_date - request_date
        if difference < datetime.timedelta(0, 1):  # 1 second
            return Response(status=304)

    #if 'if-none-match' in request.headers:
    #    print "Conditional GET: If-None-Match"
    # TODO: handle ETag in request

    handler = db.get_file(attachment_info['file']['_id'])
    response = make_response(handler.read(), 200)
    response.mimetype = attachment_info['mimetype']
    response.headers['X-Robots-Tag'] = 'noarchive'
    response.headers['ETag'] = attachment_info['sha1']
    response.headers['Last-modified'] = util.rfc1123date(
        attachment_info['file']['uploadDate'])
    response.headers['Expires'] = util.expires_date(hours=(24 * 30))
    response.headers['Cache-Control'] = util.cache_max_age(hours=(24 * 30))
    return response
Пример #23
0
def oparl_basic(content_fuction, params={}, direct_output=False):
  start_time = time.time()
  jsonp_callback = request.args.get('callback', None)
  request_info = {}
  html = request.args.get('html', False)
  if html:
    request_info['html'] = 1
  extended_info = request.args.get('i')
  extended_info = extended_info == '1'
  if extended_info:
    request_info['i'] = 1
  search_query = request.args.get('q', "")
  if search_query:
    request_info['q'] = search_query
  page = request.args.get('page')
  try:
    page = int(page)
  except (ValueError, TypeError):
    page = 1
  request_info['page'] = page
  params.update(request_info)
  response = content_fuction(params)
  if direct_output:
    return response
  if extended_info:
    ret = {
      'status': 0,
      'duration': int((time.time() - start_time) * 1000),
      'request': request_info,
      'response': response
    }
  else:
    ret = response
  json_output = json.dumps(ret, cls=util.MyEncoder)#, sort_keys=True)
  if jsonp_callback is not None:
    json_output = jsonp_callback + '(' + json_output + ')'
  if html:
    return render_template('oparl.html', data=json.JSONDecoder(object_pairs_hook=OrderedDict).decode(json_output))
  else:
    response = make_response(json_output, 200)
    response.mimetype = 'application/json'
    response.headers['Expires'] = util.expires_date(hours=24)
    response.headers['Cache-Control'] = util.cache_max_age(hours=24)
    response.headers['Access-Control-Allow-Origin'] = '*'
    return response
Пример #24
0
def api_geocode():
    start = time.time()
    jsonp_callback = request.args.get('callback', None)
    street = request.args.get('street', '')
    if street == '':
        abort(400)
    obj = {
        'result': util.geocode(street)
    }
    obj['duration'] = int((time.time() - start) * 1000)
    json_output = json.dumps(obj, sort_keys=True)
    if jsonp_callback is not None:
        json_output = jsonp_callback + '(' + json_output + ')'
    response = make_response(json_output, 200)
    response.mimetype = 'application/json'
    response.headers['Expires'] = util.expires_date(hours=24)
    response.headers['Cache-Control'] = util.cache_max_age(hours=24)
    return response
Пример #25
0
def oparl_file_accessUrl_data(params):
  file_data = db.get_file(deref={'values': ['file']},
                              search_params={'_id': ObjectId(params['file_id'])})

  if len(file_data) == 0:
    # TODO: Rendere informativere 404 Seite
    abort(404)
  file_data = file_data[0]
  # extension doesn't match file extension (avoiding arbitrary URLs)
  #proper_extension = attachment_info['filename'].split('.')[-1]
  #if proper_extension != extension:
  #    abort(404)

  # 'file' property is not set (e.g. due to depublication)
  if 'file' not in file_data:
    if 'depublication' in file_data:
      abort(410)  # Gone
    else:
      # TODO: log this as unexplicable...
      abort(500)

  # handle conditional GET
  #if 'If-Modified-Since' in request.headers:
  #  file_date = attachment_info['file']['uploadDate'].replace(tzinfo=None)
  #  request_date = util.parse_rfc1123date(request.headers['If-Modified-Since'])
  #  difference = file_date - request_date
  #  if difference < datetime.timedelta(0, 1):  # 1 second
  #    return Response(status=304)

  #if 'if-none-match' in request.headers:
  #    print "Conditional GET: If-None-Match"
  # TODO: handle ETag in request

  handler = db.get_file_data(file_data['file']['_id'])
  response = make_response(handler.read(), 200)
  response.mimetype = file_data['mimetype']
  response.headers['X-Robots-Tag'] = 'noarchive'
  response.headers['ETag'] = file_data['sha1Checksum']
  response.headers['Last-modified'] = util.rfc1123date(file_data['file']['uploadDate'])
  response.headers['Expires'] = util.expires_date(hours=(24 * 30))
  response.headers['Cache-Control'] = util.cache_max_age(hours=(24 * 30))
  return response
Пример #26
0
def oparl_file_accessUrl_data(params):
  file_data = db.get_file(deref={'values': ['file']},
                              search_params={'_id': ObjectId(params['file_id'])})

  if len(file_data) == 0:
    # TODO: Rendere informativere 404 Seite
    abort(404)
  file_data = file_data[0]
  # extension doesn't match file extension (avoiding arbitrary URLs)
  #proper_extension = attachment_info['filename'].split('.')[-1]
  #if proper_extension != extension:
  #    abort(404)

  # 'file' property is not set (e.g. due to depublication)
  if 'file' not in file_data:
    if 'depublication' in file_data:
      abort(410)  # Gone
    else:
      # TODO: log this as unexplicable...
      abort(500)

  # handle conditional GET
  #if 'If-Modified-Since' in request.headers:
  #  file_date = attachment_info['file']['uploadDate'].replace(tzinfo=None)
  #  request_date = util.parse_rfc1123date(request.headers['If-Modified-Since'])
  #  difference = file_date - request_date
  #  if difference < datetime.timedelta(0, 1):  # 1 second
  #    return Response(status=304)

  #if 'if-none-match' in request.headers:
  #    print "Conditional GET: If-None-Match"
  # TODO: handle ETag in request

  handler = db.get_file_data(file_data['file']['_id'])
  response = make_response(handler.read(), 200)
  response.mimetype = file_data['mimetype']
  response.headers['X-Robots-Tag'] = 'noarchive'
  response.headers['ETag'] = file_data['sha1Checksum']
  response.headers['Last-modified'] = util.rfc1123date(file_data['file']['uploadDate'])
  response.headers['Expires'] = util.expires_date(hours=(24 * 30))
  response.headers['Cache-Control'] = util.cache_max_age(hours=(24 * 30))
  return response
Пример #27
0
def api_papers():
  """
  API-Methode zur Suche von Paper
  """
  start_time = time.time()
  jsonp_callback = request.args.get('callback', None)
  ref = request.args.get('reference', '')
  q = request.args.get('q', '*:*')
  fq = request.args.get('fq', '')
  sort = request.args.get('sort', 'score:desc')
  start = int(request.args.get('start', '0'))
  papers_per_page = int(request.args.get('ppp', '10'))
  date_param = request.args.get('date', '')
  region = request.args.get('r', '')
  output = request.args.get('output', '').split(',')
  get_facets = 'facets' in output
  request_info = {}  # Info über die Anfrage
  
  # Suche wird durchgeführt
  query = db.query_paper(region=region, q=q, fq=fq, sort=sort, start=start,
             papers_per_page=papers_per_page, facets=get_facets)
  
  ret = {
    'status': 0,
    'duration': int((time.time() - start_time) * 1000),
    'request': request_info,
    'response': query
  }
  
  ret['response']['start'] = start
  ret['request']['sort'] = sort
  ret['request']['fq'] = fq

  json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
  if jsonp_callback is not None:
    json_output = jsonp_callback + '(' + json_output + ')'
  response = make_response(json_output, 200)
  response.mimetype = 'application/json'
  response.headers['Expires'] = util.expires_date(hours=24)
  response.headers['Cache-Control'] = util.cache_max_age(hours=24)
  return response
Пример #28
0
def api_locations():
    start_time = time.time()
    jsonp_callback = request.args.get("callback", None)
    street = request.args.get("street", "")
    if street == "":
        abort(400)
    result = db.get_locations_by_name(street)
    ret = {
        "status": 0,
        "duration": round((time.time() - start_time) * 1000),
        "request": {"street": street},
        "response": result,
    }
    json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
    if jsonp_callback is not None:
        json_output = jsonp_callback + "(" + json_output + ")"
    response = make_response(json_output, 200)
    response.mimetype = "application/json"
    response.headers["Expires"] = util.expires_date(hours=24)
    response.headers["Cache-Control"] = util.cache_max_age(hours=24)
    return response
Пример #29
0
def suche():
    """
    URL-Parameter:
    q: Suchanfrage, nutzer-formuliert
    fq: Filter query (Lucene Syntax)
    sort: Sortierung, z.B. "id asc"
    start: Offset im Suchergebnis
    num: Anzahl der Treffer pro Seite
    date: Datumsbereich als String
    """
    search_settings = {}
    search_settings['q'] = request.args.get('q', '')
    search_settings['fq'] = request.args.get('fq', '')
    search_settings['sort'] = request.args.get('sort', '')
    search_settings['start'] = int(request.args.get('start', '0'))
    search_settings['num'] = int(request.args.get('num', '10'))
    search_settings['num'] = min(search_settings['num'], 100)  # max 100 items
    search_settings['date'] = request.args.get('date', '')
    html = render_template('suche.html', search_settings=search_settings)
    response = make_response(html, 200)
    response.headers['Expires'] = util.expires_date(hours=24)
    response.headers['Cache-Control'] = util.cache_max_age(hours=24)
    return response
Пример #30
0
def api_locations():
    start_time = time.time()
    jsonp_callback = request.args.get('callback', None)
    street = request.args.get('street', '')
    if street == '':
        abort(400)
    result = db.get_locations_by_name(street)
    ret = {
        'status': 0,
        'duration': round((time.time() - start_time) * 1000),
        'request': {
            'street': street
        },
        'response': result
    }
    json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
    if jsonp_callback is not None:
        json_output = jsonp_callback + '(' + json_output + ')'
    response = make_response(json_output, 200)
    response.mimetype = 'application/json'
    response.headers['Expires'] = util.expires_date(hours=24)
    response.headers['Cache-Control'] = util.cache_max_age(hours=24)
    return response
Пример #31
0
def api_locations():
    start_time = time.time()
    jsonp_callback = request.args.get('callback', None)
    street = request.args.get('street', '')
    if street == '':
        abort(400)
    result = db.get_locations_by_name(street)
    ret = {
        'status': 0,
        'duration': round((time.time() - start_time) * 1000),
        'request': {
            'street': street
        },
        'response': result
    }
    json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
    if jsonp_callback is not None:
        json_output = jsonp_callback + '(' + json_output + ')'
    response = make_response(json_output, 200)
    response.mimetype = 'application/json'
    response.headers['Expires'] = util.expires_date(hours=24)
    response.headers['Cache-Control'] = util.cache_max_age(hours=24)
    return response
Пример #32
0
def api_papers():
    """
  API-Methode zur Suche von Paper
  """
    start_time = time.time()
    jsonp_callback = request.args.get("callback", None)
    ref = request.args.get("reference", "")
    q = request.args.get("q", "*:*")
    fq = request.args.get("fq", "")
    sort = request.args.get("sort", "score:desc")
    start = int(request.args.get("start", "0"))
    papers_per_page = int(request.args.get("ppp", "10"))
    date_param = request.args.get("date", "")
    region = request.args.get("r", "")
    output = request.args.get("output", "").split(",")
    get_facets = "facets" in output
    request_info = {}  # Info über die Anfrage

    # Suche wird durchgeführt
    query = db.query_paper(
        region=region, q=q, fq=fq, sort=sort, start=start, papers_per_page=papers_per_page, facets=get_facets
    )

    ret = {"status": 0, "duration": int((time.time() - start_time) * 1000), "request": request_info, "response": query}

    ret["response"]["start"] = start
    ret["request"]["sort"] = sort
    ret["request"]["fq"] = fq

    json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
    if jsonp_callback is not None:
        json_output = jsonp_callback + "(" + json_output + ")"
    response = make_response(json_output, 200)
    response.mimetype = "application/json"
    response.headers["Expires"] = util.expires_date(hours=24)
    response.headers["Cache-Control"] = util.cache_max_age(hours=24)
    return response
Пример #33
0
def api_streets():
    start_time = time.time()
    jsonp_callback = request.args.get('callback', None)
    lon = request.args.get('lon', '')
    lat = request.args.get('lat', '')
    radius = request.args.get('radius', '1000')
    if lat == '' or lon == '':
        abort(400)
    lon = float(lon)
    lat = float(lat)
    radius = int(radius)
    radius = min(radius, 500)
    result = db.get_locations(lon, lat, radius)
    ret = {
        'status': 0,
        'duration': round((time.time() - start_time) * 1000),
        'request': {
            'lon': lon,
            'lat': lat,
            'radius': radius
        },
        'response': result
    }
    try:
        json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
    except AttributeError:
        print >> sys.stderr, ret
        return null

    if jsonp_callback is not None:
        json_output = jsonp_callback + '(' + json_output + ')'
    response = make_response(json_output, 200)
    response.mimetype = 'application/json'
    response.headers['Expires'] = util.expires_date(hours=24)
    response.headers['Cache-Control'] = util.cache_max_age(hours=24)
    return response
Пример #34
0
def search_traffic_items_es():
  start_time = time.time()
  limits = request.form.get('l', None)
  traffic_item_type = request.form.get('traffic_item_type', None)
  construction_site_date = request.form.get('date', None)
  occupancy_rate = request.form.get('occupancy_rate', None)
  zoom = request.form.get('zoom', None)
  saved_request = {
    'limits': limits,
    'traffic_item_type': traffic_item_type,
    'construction_site_date': construction_site_date,
    'occupancy_rate': occupancy_rate,
    'zoom': zoom
  }
  if limits:
    limits = limits.split(';')
  
  query_parts_must = []
  query_parts_should = []
  
  if traffic_item_type:
    traffic_item_type = traffic_item_type.split(',')
    query_parts_must.append({
      'terms': {
        'traffic_item_type': traffic_item_type
      }
    })
  
  
  if '1' in traffic_item_type:
    query_parts_should.append(
      {
        'bool': {
          'must': [
            {
              'range': {
                'start': {
                  'lte': construction_site_date
                }
              }
            },
            {
              'range': {
                'end': {
                  'gte': construction_site_date
                }
              }
            },
            {
              'term': {
                'traffic_item_type': 1
              }
            }
          ]
        }
      }
    )
    
  
  if '2' in traffic_item_type:
    query_parts_should.append(
      {
        'bool': {
          'must': [
            #{
            #  'range': {
            #    'occupancy_rate': {
            #      'gte': occupancy_rate
            #    }
            #  }
            #},
            {
              'term': {
                'traffic_item_type': 2
              }
            }
          ]
        }
      }
    )
  if limits:
    limit_queries = {}
    for limit in limits:
      if limit.find('<=') >= 0:
        limit_split = limit.split('<=')
        if (limit_split[0] not in limit_queries):
          limit_queries[limit_split[0]] = {}
        limit_queries[limit_split[0]]['lte'] = limit_split[1]
      elif limit.find('>=') >= 0:
        limit_split = limit.split('>=')
        if (limit_split[0] not in limit_queries):
          limit_queries[limit_split[0]] = {}
        limit_queries[limit_split[0]]['gte'] = limit_split[1]
      elif limit.find('>') >= 0:
        limit_split = limit.split('>')
        if (limit_split[0] not in limit_queries):
          limit_queries[limit_split[0]] = {}
        limit_queries[limit_split[0]]['lt'] = limit_split[1]
      elif limit.find('<') >= 0:
        limit_split = limit.split('<')
        if (limit_split[0] not in limit_queries):
          limit_queries[limit_split[0]] = {}
        limit_queries[limit_split[0]]['lt'] = limit_split[1]
    for limit_query_key, limit_query_value in limit_queries.iteritems():
      query_parts_must.append({
        'range': {
          limit_query_key: limit_query_value
        }
      })
  
  query = {
    'query': {
      'constant_score': {
        'filter': {
          'bool': {
            'must': [{"match_all": {}}] + query_parts_must,
            'should': query_parts_should
          }
        }
      }
    }
  }
  
  es_result = es.search(
    index = app.config['TRAFFIC_ITEMS_ES'] + '-latest',
    doc_type = 'traffic_item',
    fields = 'id,location.lat,location.lon,traffic_item_type,area,start,end,occupancy_rate',
    body = query,
    size = 10000
  )
  result = []
  for single in es_result['hits']['hits']:
    item = {
      'id': single['fields']['id'][0],
      'lat': single['fields']['location.lat'][0],
      'lon': single['fields']['location.lon'][0],
      'type': single['fields']['traffic_item_type'][0]
    }
    if 'area' in single['fields']:
      item['area'] = json.loads(single['fields']['area'][0])
    if 'start' in single['fields']:
      item['start'] = single['fields']['start'][0]
    if 'end' in single['fields']:
      item['end'] = single['fields']['end'][0]
    if 'occupancy_rate' in single['fields']:
      item['occupancy_rate'] = single['fields']['occupancy_rate'][0]
    result.append(item)
  ret = {
    'status': 0,
    'request': saved_request,
    'duration': round((time.time() - start_time) * 1000),
    'response': result
  }
  json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
  response = make_response(json_output, 200)
  response.mimetype = 'application/json'
  response.headers['Expires'] = util.expires_date(hours=24)
  response.headers['Cache-Control'] = util.cache_max_age(hours=24)
  return(response)
Пример #35
0
def search_traffic_items():
  start_time = time.time()
  limits = request.form.get('l', None)
  traffic_item_type = request.form.get('traffic_item_type', None)
  construction_site_date = request.form.get('date', None)
  occupancy_rate = request.form.get('occupancy_rate', None)
  zoom = request.form.get('zoom', None)
  processed = request.form.get('processed', 1, type=int)
  
  saved_request = {
    'limits': limits,
    'traffic_item_type': traffic_item_type,
    'construction_site_date': construction_site_date,
    'occupancy_rate': occupancy_rate,
    'zoom': zoom
  }
  limits = limits.split(';')
  
  result_raw = mongo.db.traffic_item.find({
    '$and': [
      {
        'properties.processed': processed
      },
      {
        'geometry': {
          '$geoIntersects': {
            '$geometry': {
              'type':  'Polygon',
              'coordinates': [
                [
                  [float(limits[2]), float(limits[0])],
                  [float(limits[2]), float(limits[1])],
                  [float(limits[3]), float(limits[1])],
                  [float(limits[3]), float(limits[0])],
                  [float(limits[2]), float(limits[0])]
                ]
              ]
            }
          }
        }
      },
      {
        'properties.zoom_level_min': {
          '$lt': int(zoom)
        }
      },
      {
        '$or': [
          {
            '$and': [
              {
                'properties.validityOverallStartTime': {
                  '$lt': int(construction_site_date)
                },
              },
              {
                'properties.validityOverallEndTime': {
                  '$gt': int(construction_site_date)
                },
              },
              {
                'properties.traffic_item_type': {
                  '$eq': 1
                }
              }
            ]
          },
          {
            '$and': [
              {
                'properties.occupancy_rate': {
                  '$lt': float(occupancy_rate) / 100.0
                }
              },
              {
                'properties.traffic_item_type': {
                  '$eq': 2
                }
              }
            ]
          },
          {
            
            'properties.traffic_item_type': {
              '$eq': 3
            }
          }
        ]
      }
    ]
  })
  result = []
  for value in result_raw:
    result.append(value)
  
  ret = {
    'status': 0,
    'request': saved_request,
    'duration': round((time.time() - start_time) * 1000),
    'response': {
      'type': 'FeatureCollection',
      'features': result
    }
  }
  json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
  response = make_response(json_output, 200)
  response.mimetype = 'application/json'
  response.headers['Expires'] = util.expires_date(hours=24)
  response.headers['Cache-Control'] = util.cache_max_age(hours=24)
  return(response)
Пример #36
0
def api_documents():
    """
    API-Methode zur Suche von Dokumenten bzw. zum Abruf eines einzelnen
    Dokuments anhand einer Kennung (reference).
    Ist der URL-Parameter "reference" angegeben, handelt es sich um eine
    Dokumentenabfrage anhand der Kennung(en). Ansonsten ist es eine Suche.
    """
    start_time = time.time()
    jsonp_callback = request.args.get("callback", None)
    ref = request.args.get("reference", "")
    references = ref.split(",")
    if references == [""]:
        references = None
    output = request.args.get("output", "").split(",")
    q = request.args.get("q", "*:*")
    fq = request.args.get("fq", "")
    sort = request.args.get("sort", "score desc")
    start = int(request.args.get("start", "0"))
    numdocs = int(request.args.get("docs", "10"))
    date_param = request.args.get("date", "")
    get_attachments = "attachments" in output
    get_thumbnails = "thumbnails" in output and get_attachments
    get_consultations = "consultations" in output
    get_facets = "facets" in output
    # get_relations = 'relations' in output
    request_info = {}  # Info über die Anfrage
    query = False
    docs = False
    submission_ids = []
    # TODO: entscheiden, was mit get_relations passiert
    """
    Anhand der übergebenen Parameter wird entschieden, ob eine Solr-Suche
    durchgeführt wird, oder ob die Abfrage direkt anhand von Kennungen
    (references) erfolgen kann.
    """
    if references is None:
        # Suche wird durchgeführt
        # (References-Liste via Suchmaschine füllen)
        query = db.query_submissions(
            q=q, fq=fq, sort=sort, start=start, docs=numdocs, date=date_param, facets=get_facets
        )
        if query["numhits"] > 0:
            submission_ids = [x["_id"] for x in query["result"]]
        else:
            docs = []
    else:
        # Direkte Abfrage
        request_info = {"references": references}
    request_info["output"] = output

    # Abrufen der benötigten Dokumente aus der Datenbank
    if references is not None:
        docs = db.get_submissions(
            references=references,
            get_attachments=get_attachments,
            get_consultations=get_consultations,
            get_thumbnails=get_thumbnails,
        )
    elif len(submission_ids) > 0:
        docs = db.get_submissions(
            submission_ids=submission_ids,
            get_attachments=get_attachments,
            get_consultations=get_consultations,
            get_thumbnails=get_thumbnails,
        )

    ret = {"status": 0, "duration": int((time.time() - start_time) * 1000), "request": request_info, "response": {}}
    if docs:
        ret["response"]["documents"] = docs
        ret["response"]["numdocs"] = len(docs)
        if query and "maxscore" in query:
            ret["response"]["maxscore"] = query["maxscore"]
        for n in range(len(docs)):
            docs[n]["reference"] = docs[n]["identifier"]
            del docs[n]["identifier"]

    if query:
        ret["response"]["numhits"] = query["numhits"]
        if get_facets and "facets" in query:
            ret["response"]["facets"] = query["facets"]

    ret["response"]["start"] = start
    ret["request"]["sort"] = sort

    json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
    if jsonp_callback is not None:
        json_output = jsonp_callback + "(" + json_output + ")"
    response = make_response(json_output, 200)
    response.mimetype = "application/json"
    response.headers["Expires"] = util.expires_date(hours=24)
    response.headers["Cache-Control"] = util.cache_max_age(hours=24)
    return response
Пример #37
0
def api_documents():
    """
    API-Methode zur Suche von Dokumenten bzw. zum Abruf eines einzelnen
    Dokuments anhand einer Kennung (reference).
    Ist der URL-Parameter "reference" angegeben, handelt es sich um eine
    Dokumentenabfrage anhand der Kennung(en). Ansonsten ist es eine Suche.
    """
    start_time = time.time()
    jsonp_callback = request.args.get('callback', None)
    ref = request.args.get('reference', '')
    references = ref.split(',')
    if references == ['']:
        references = None
    output = request.args.get('output', '').split(',')
    q = request.args.get('q', '*:*')
    fq = request.args.get('fq', '')
    sort = request.args.get('sort', 'score desc')
    start = int(request.args.get('start', '0'))
    numdocs = int(request.args.get('docs', '10'))
    date_param = request.args.get('date', '')
    get_attachments = 'attachments' in output
    get_thumbnails = 'thumbnails' in output and get_attachments
    get_consultations = 'consultations' in output
    get_facets = 'facets' in output
    #get_relations = 'relations' in output
    request_info = {}  # Info über die Anfrage
    query = False
    docs = False
    submission_ids = []
    # TODO: entscheiden, was mit get_relations passiert
    """
    Anhand der übergebenen Parameter wird entschieden, ob eine ES-Suche
    durchgeführt wird, oder ob die Abfrage direkt anhand von Kennungen
    (references) erfolgen kann.
    """

    if references is None:
        # Suche wird durchgeführt
        # (References-Liste via Suchmaschine füllen)
        query = db.query_submissions(q=q,
                                     fq=fq,
                                     sort=sort,
                                     start=start,
                                     docs=numdocs,
                                     date=date_param,
                                     facets=get_facets)
        if query['numhits'] > 0:
            submission_ids = [x['_id'] for x in query['result']]
        else:
            docs = []
    else:
        # Direkte Abfrage
        request_info = {'references': references}
    request_info['output'] = output

    # Abrufen der benötigten Dokumente aus der Datenbank
    if references is not None:
        docs = db.get_submissions(references=references,
                                  get_attachments=get_attachments,
                                  get_consultations=get_consultations,
                                  get_thumbnails=get_thumbnails)
    elif len(submission_ids) > 0:
        docs = db.get_submissions(submission_ids=submission_ids,
                                  get_attachments=get_attachments,
                                  get_consultations=get_consultations,
                                  get_thumbnails=get_thumbnails)

    ret = {
        'status': 0,
        'duration': int((time.time() - start_time) * 1000),
        'request': request_info,
        'response': {}
    }
    if docs:
        ret['response']['documents'] = docs
        ret['response']['numdocs'] = len(docs)
        if query and 'maxscore' in query:
            ret['response']['maxscore'] = query['maxscore']
        for n in range(len(docs)):
            docs[n]['reference'] = docs[n]['identifier']
            del docs[n]['identifier']

    if query:
        ret['response']['numhits'] = query['numhits']
        if get_facets and 'facets' in query:
            ret['response']['facets'] = query['facets']

    ret['response']['start'] = start
    ret['request']['sort'] = sort
    ret['request']['fq'] = fq

    json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
    if jsonp_callback is not None:
        json_output = jsonp_callback + '(' + json_output + ')'
    response = make_response(json_output, 200)
    response.mimetype = 'application/json'
    response.headers['Expires'] = util.expires_date(hours=24)
    response.headers['Cache-Control'] = util.cache_max_age(hours=24)
    return response
Пример #38
0
def suche_feed():
    start_time = time.time()
    jsonp_callback = request.args.get('callback', None)
    q = request.args.get('q', '*:*')
    fq = request.args.get('fq', '')
    date_param = request.args.get('date', '')
    region = request.args.get('r', app.config['region_default'])

    # Suche wird durchgeführt
    query = db.query_paper(region=region,
                           q=q,
                           fq=fq,
                           sort='publishedDate:desc',
                           start=0,
                           papers_per_page=50,
                           facets=False)

    # Generate Root and Metainfos
    search_url = "%s/suche/?r=%s&q=%s&fq=%s" % (app.config['base_url'], region,
                                                q, fq)
    feed_url = "%s/suche/feed/?r=%s&q=%s&fq=%s" % (app.config['base_url'],
                                                   region, q, fq)
    root = etree.Element("rss",
                         version="2.0",
                         nsmap={'atom': 'http://www.w3.org/2005/Atom'})
    channel = etree.SubElement(root, "channel")
    etree.SubElement(
        channel, "title").text = 'Offenes Ratsinformationssystem: Paper-Feed'
    etree.SubElement(channel, "link").text = search_url
    etree.SubElement(channel, "language").text = 'de-de'
    description = u"Neue oder geänderte Dokumente mit dem Suchbegriff %s in %s" % (
        q, app.config['regions'][region]['name'])
    # TODO: Einschränkungen mit in die Description
    if fq:
        description += ''
    etree.SubElement(channel, "description").text = description
    etree.SubElement(channel,
                     '{http://www.w3.org/2005/Atom}link',
                     href=feed_url,
                     rel="self",
                     type="application/rss+xml")

    # Generate Result Items
    for paper in query['result']:
        item = etree.SubElement(channel, "item")
        paper_link = "%s/paper/%s" % (app.config['base_url'], paper['id'])
        description = 'Link: ' + paper_link + '</br>'
        if 'paperType' in paper:
            description = 'Art des Papers: ' + paper['paperType'] + '<br />'
        if 'publishedDate' in paper:
            description += u"Erstellt am: %s<br />" % dateutil.parser.parse(
                paper['publishedDate']).strftime('%d.%m.%Y')
        if 'modified' in paper:
            description += u"Zuletzt geändert am: %s<br />" % dateutil.parser.parse(
                paper['modified']).strftime('%d.%m.%Y')

        etree.SubElement(item, "pubDate").text = util.rfc1123date(
            paper['modified'] if 'modified' in
            paper else datetime.datetime.now())
        etree.SubElement(
            item,
            "title").text = paper['name'] if 'name' in paper else 'Kein Titel'
        etree.SubElement(item, "description").text = description
        etree.SubElement(item, "link").text = paper_link
        etree.SubElement(item, "guid").text = paper_link

    response = make_response(etree.tostring(root, pretty_print=True), 200)
    response.mimetype = 'application/rss+xml'
    response.headers['Expires'] = util.expires_date(hours=24)
    response.headers['Cache-Control'] = util.cache_max_age(hours=24)
    return response
Пример #39
0
def api_documents():
    """
    API-Methode zur Suche von Dokumenten bzw. zum Abruf eines einzelnen
    Dokuments anhand einer Kennung (reference).
    Ist der URL-Parameter "reference" angegeben, handelt es sich um eine
    Dokumentenabfrage anhand der Kennung(en). Ansonsten ist es eine Suche.
    """
    start_time = time.time()
    jsonp_callback = request.args.get('callback', None)
    ref = request.args.get('reference', '')
    references = ref.split(',')
    if references == ['']:
        references = None
    output = request.args.get('output', '').split(',')
    q = request.args.get('q', '*:*')
    fq = request.args.get('fq', '')
    sort = request.args.get('sort', 'score desc')
    start = int(request.args.get('start', '0'))
    numdocs = int(request.args.get('docs', '10'))
    date_param = request.args.get('date', '')
    get_attachments = 'attachments' in output
    get_thumbnails = 'thumbnails' in output and get_attachments
    get_consultations = 'consultations' in output
    get_facets = 'facets' in output
    #get_relations = 'relations' in output
    request_info = {}  # Info über die Anfrage
    query = False
    docs = False
    submission_ids = []
    # TODO: entscheiden, was mit get_relations passiert
    """
    Anhand der übergebenen Parameter wird entschieden, ob eine Solr-Suche
    durchgeführt wird, oder ob die Abfrage direkt anhand von Kennungen
    (references) erfolgen kann.
    """
    if references is None:
        # Suche wird durchgeführt
        # (References-Liste via Suchmaschine füllen)
        query = db.query_submissions(q=q, fq=fq, sort=sort, start=start,
                           docs=numdocs, date=date_param, facets=get_facets)
        if query['numhits'] > 0:
            submission_ids = [x['_id'] for x in query['result']]
        else:
            docs = []
    else:
        # Direkte Abfrage
        request_info = {
            'references': references
        }
    request_info['output'] = output

    # Abrufen der benötigten Dokumente aus der Datenbank
    if references is not None:
        docs = db.get_submissions(references=references,
                        get_attachments=get_attachments,
                        get_consultations=get_consultations,
                        get_thumbnails=get_thumbnails)
    elif len(submission_ids) > 0:
        docs = db.get_submissions(submission_ids=submission_ids,
                        get_attachments=get_attachments,
                        get_consultations=get_consultations,
                        get_thumbnails=get_thumbnails)

    ret = {
        'status': 0,
        'duration': int((time.time() - start_time) * 1000),
        'request': request_info,
        'response': {}
    }
    if docs:
        ret['response']['documents'] = docs
        ret['response']['numdocs'] = len(docs)
        if query and 'maxscore' in query:
            ret['response']['maxscore'] = query['maxscore']
        for n in range(len(docs)):
            docs[n]['reference'] = docs[n]['identifier']
            del docs[n]['identifier']

    if query:
        ret['response']['numhits'] = query['numhits']
        if get_facets and 'facets' in query:
            ret['response']['facets'] = query['facets']

    ret['response']['start'] = start
    ret['request']['sort'] = sort

    json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
    if jsonp_callback is not None:
        json_output = jsonp_callback + '(' + json_output + ')'
    response = make_response(json_output, 200)
    response.mimetype = 'application/json'
    response.headers['Expires'] = util.expires_date(hours=24)
    response.headers['Cache-Control'] = util.cache_max_age(hours=24)
    return response
def region_search():
  start_time = time.time()
  result = []
  search_string = request.args.get('q', False)
  # generate fulltext search string
  if not search_string:
    search_results = []
  else:
    search_string = search_string.split()
    search_string_to_complete = search_string[-1]
    query_parts = []
    query_parts.append({
      'match_phrase_prefix': {
        'name': search_string_to_complete.lower()
      }
    })
    if len(search_string[0:-1]):
      query_parts.append({
        'query_string': {
          'fields': ['name'],
          'query': " ".join(search_string[0:-1]),
          'default_operator': 'and'
        }
      })
    try:
      result = es.search(
        index = "%s-latest" % app.config['REGION_ES'],
        doc_type = 'regions',
        fields = 'name,slug,postalcode,location',
        body = {
          'query': {
            'bool': {
              'must': query_parts
            }
          },
          'aggs': {
            'fragment': {
              'terms': {
                'field': 'name',
                'include': {
                  'pattern': search_string_to_complete.lower() + '.*',
                  'flags': 'CANON_EQ|CASE_INSENSITIVE',
                },
                'min_doc_count': 0,
                'size': 10
              }
            }
          }
        },
        size = 10
      )
    except elasticsearch.NotFoundError:
      abort(403)
    search_results = []
    for dataset in result['hits']['hits']:
      tmp_search_result = {
        'name': dataset['fields']['name'][0],
        'postalcode': dataset['fields']['postalcode'][0] if len(dataset['fields']['postalcode']) else None,
        'slug': dataset['fields']['slug'][0]
      }
      search_results.append(tmp_search_result)

  ret = {
    'status': 0,
    'duration': round((time.time() - start_time) * 1000),
    'response': search_results
  }
  json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
  response = make_response(json_output, 200)
  response.mimetype = 'application/json'
  response.headers['Expires'] = util.expires_date(hours=24)
  response.headers['Cache-Control'] = util.cache_max_age(hours=24)
  return(response)
Пример #41
0
def search_sharing_stations():
    start_time = time.time()
    fq = request.form.get('fq', '')
    limits = request.form.get('l', None)
    vehicle_all = request.form.get('vehicle_all', None)
    if vehicle_all == "0,20":
        vehicle_all = None
    vehicle_type = request.form.get('vehicle_type', None)
    if vehicle_type == '' or vehicle_type == '1,2,3,4,5':
        vehicle_type = None
    sort = request.form.get('sort', 'name.sort:asc')
    start = int(request.form.get('start', '0'))
    per_page = int(request.form.get('pp', '50'))
    view_type = request.form.get('vt', 's')

    saved_request = {'sort': sort, 'start': start, 'per_page': per_page}
    if fq:
        saved_request['fq'] = fq
    if limits:
        limits = limits.split(';')

    (sort_field, sort_order) = sort.split(':')
    if sort_field == 'score':
        sort_field = '_score'
    sort = {sort_field: {'order': sort_order}}

    query_parts_must = []
    query_parts_filter = []
    query_parts_should = []
    # all_count
    if vehicle_all:
        vehicle_all = vehicle_all.split(',')
        query_parts_must.append({
            'range': {
                'vehicle_all': {
                    'gte': vehicle_all[0],
                    'lte': 64 if vehicle_all[1] == '20' else vehicle_all[1]
                }
            }
        })
    # vehicle_type
    if vehicle_type:
        vehicle_type = vehicle_type.split(',')
        query_parts_filter.append({'terms': {'station_type': vehicle_type}})

    if limits:
        limit_queries = {}
        for limit in limits:
            if limit.find('<=') >= 0:
                limit_split = limit.split('<=')
                if (limit_split[0] not in limit_queries):
                    limit_queries[limit_split[0]] = {}
                limit_queries[limit_split[0]]['lte'] = limit_split[1]
            elif limit.find('>=') >= 0:
                limit_split = limit.split('>=')
                if (limit_split[0] not in limit_queries):
                    limit_queries[limit_split[0]] = {}
                limit_queries[limit_split[0]]['gte'] = limit_split[1]
            elif limit.find('>') >= 0:
                limit_split = limit.split('>')
                if (limit_split[0] not in limit_queries):
                    limit_queries[limit_split[0]] = {}
                limit_queries[limit_split[0]]['lt'] = limit_split[1]
            elif limit.find('<') >= 0:
                limit_split = limit.split('<')
                if (limit_split[0] not in limit_queries):
                    limit_queries[limit_split[0]] = {}
                limit_queries[limit_split[0]]['lt'] = limit_split[1]
        for limit_query_key, limit_query_value in limit_queries.iteritems():
            query_parts_must.append(
                {'range': {
                    limit_query_key: limit_query_value
                }})
    query = {
        'query': {
            'bool': {
                'must': [{
                    "match_all": {}
                }] + query_parts_must,
                'filter': query_parts_filter
            }
        }
    }

    if view_type == 's':
        fields = 'name,station_type,vehicle_all,location.lat,location.lon,sharing_provider.name,sharing_provider.slug'

    es_result = es.search(index=app.config['SHARING_STATION_ES'] + '-latest',
                          doc_type='sharing_station',
                          fields=fields,
                          body=query,
                          from_=start,
                          size=per_page,
                          sort=sort_field + ':' + sort_order)
    result = []
    for single in es_result['hits']['hits']:
        item = {
            'name': single['fields']['name'][0],
            'lat': single['fields']['location.lat'][0],
            'lon': single['fields']['location.lon'][0],
            'station_type': single['fields']['station_type'][0],
            'sharing_provider_slug':
            single['fields']['sharing_provider.slug'][0],
            'sharing_provider_name':
            single['fields']['sharing_provider.name'][0]
        }
        if 'vehicle_all' in single['fields']:
            item['vehicle_all'] = single['fields']['vehicle_all'][0]
        result.append(item)
    ret = {
        'status': 0,
        'request': saved_request,
        'duration': round((time.time() - start_time) * 1000),
        'response': result
    }
    json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)
    response = make_response(json_output, 200)
    response.mimetype = 'application/json'
    response.headers['Expires'] = util.expires_date(hours=24)
    response.headers['Cache-Control'] = util.cache_max_age(hours=24)
    return (response)