Exemplo n.º 1
0
 def jpg(self, cam=1, latesttime=None):
     """Return Image"""
     web.modified(self.lastupdate('jpg'))
     sql = []
     sql.append("""SELECT * FROM icecast.station_image
                   WHERE station_id = $stationid
                   AND camera_no = $camerano
                   AND image_datetime = (SELECT MAX(image_datetime)
                                           FROM icecast.station_image
                                           WHERE station_id = $stationid
                                           AND camera_no = $camerano""")
     variables = {
         'stationid': self.row['station_id'],
         'camerano': cam,
     }
     if latesttime:
         sql.append("AND image_datetime <= $latesttime")
         variables['latesttime'] = latesttime
     sql.append(")")
     try:
         return db.query(' '.join(sql), variables)[0].image_content
     except IndexError:
         web.header("Content-Type", "text/html")
         raise web.HTTPError("404 not found", {},
                             "No camera image available.")
Exemplo n.º 2
0
def serve_file(path):
    '''Serve static file from the given path. Sets headers: Modified, Content-Type, Encoding'''
    import mimetypes
    from datetime import datetime
    if not os.path.exists(path):
        raise web.notfound()
    mdate = datetime.fromtimestamp(os.stat(path).st_mtime)
    web.modified(date=mdate, etag='')
    fh = open(path, 'rb')
    s = fh.read()
    fh.close()
    mime, enc = mimetypes.guess_type(path, strict=False)
    if mime:
        web.header('Content-Type', mime)
    if enc:
        web.header('Content-Encoding', enc)
    return s
Exemplo n.º 3
0
def render_cached_feed(events, format_type='atom'):
    last_modif_time = events[0].time_taken
    web.debug(last_modif_time)
    #http conditional get
    if not config.http_conditional_get or web.modified(last_modif_time):
        web.lastmodified(last_modif_time)
        web.header("Content-Type", "application/atom+xml")
        web.debug(len(events))
        print render.events_atom_feed(config, last_modif_time, events)
Exemplo n.º 4
0
    def GET(self, category, key, value, size):
        i = web.input(default="true")
        key = key.lower()

        def notfound():
            if config.default_image and i.default.lower(
            ) != "false" and not i.default.startswith('http://'):
                return read_file(config.default_image)
            elif i.default.startswith('http://'):
                raise web.seeother(i.default)
            else:
                raise web.notfound("")

        def redirect(id):
            size_part = size and ("-" + size) or ""
            url = "/%s/id/%s%s.jpg" % (category, id, size_part)

            query = web.ctx.env.get('QUERY_STRING')
            if query:
                url += '?' + query
            raise web.found(url)

        if key == 'isbn':
            value = value.replace("-", "").strip()  # strip hyphens from ISBN
            # Disabling ratelimit as iptables is taking care of botnets.
            #value = self.ratelimit_query(category, key, value)
            value = self.query(category, key, value)
        elif key != 'id':
            value = self.query(category, key, value)

        d = value and db.details(value)
        if not d:
            return notfound()

        # set cache-for-ever headers only when requested with ID
        if key == 'id':
            etag = "%s-%s" % (d.id, size.lower())
            if not web.modified(trim_microsecond(d.created), etag=etag):
                raise web.notmodified()

            web.header('Cache-Control', 'public')
            web.expires(
                100 * 365 * 24 *
                3600)  # this image is not going to expire in next 100 years.
        else:
            web.header('Cache-Control', 'public')
            web.expires(
                10 * 60
            )  # Allow the client to cache the image for 10 mins to avoid further requests

        web.header('Content-Type', 'image/jpeg')
        try:
            return read_image(d, size)
        except IOError:
            raise web.notfound()
Exemplo n.º 5
0
        def GET(self):
            i = web.input(v=None)
            v = doc.md5()
            if v != i.v:
                raise web.seeother(web.changequery(v=v))

            if web.modified(etag=v):
                oneyear = 365 * 24 * 3600
                web.header("Content-Type", content_type)
                web.header("Cache-Control", "Public, max-age=%d" % oneyear)
                web.lastmodified(doc.last_modified)
                web.expires(oneyear)
                return delegate.RawText(doc.get_text())
Exemplo n.º 6
0
        def GET(self):
            i = web.input(v=None)
            v = doc.md5()
            if v != i.v:
                raise web.seeother(web.changequery(v=v))

            if web.modified(etag=v):
                oneyear = 365 * 24 * 3600
                web.header("Content-Type", content_type)
                web.header("Cache-Control", "Public, max-age=%d" % oneyear)
                web.lastmodified(doc.last_modified)
                web.expires(oneyear)
                return delegate.RawText(doc.get_text())
Exemplo n.º 7
0
    def GET(self, category, key, value, size):
        i = web.input(default="true")
        key = key.lower()
        
        def notfound():
            if config.default_image and i.default.lower() != "false" and not i.default.startswith('http://'):
                return read_file(config.default_image)
            elif i.default.startswith('http://'):
                raise web.seeother(i.default)
            else:
                raise web.notfound("")
                
        def redirect(id):
            size_part = size and ("-" + size) or ""
            url = "/%s/id/%s%s.jpg" % (category, id, size_part)
            
            query = web.ctx.env.get('QUERY_STRING')
            if query:
                url += '?' + query
            raise web.found(url)
        
        if key == 'isbn':
            value = value.replace("-", "").strip() # strip hyphens from ISBN
            # Disabling ratelimit as iptables is taking care of botnets.
            #value = self.ratelimit_query(category, key, value)
            value = self.query(category, key, value)
        elif key != 'id':
            value = self.query(category, key, value)
        
        d = value and self.get_details(value, size.lower())
        if not d:
            return notfound()
            
        # set cache-for-ever headers only when requested with ID
        if key == 'id':
            etag = "%s-%s" % (d.id, size.lower())
            if not web.modified(trim_microsecond(d.created), etag=etag):
                raise web.notmodified()

            web.header('Cache-Control', 'public')
            web.expires(100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years.
        else:
            web.header('Cache-Control', 'public')
            web.expires(10*60) # Allow the client to cache the image for 10 mins to avoid further requests
        
        web.header('Content-Type', 'image/jpeg')
        try:
            return read_image(d, size)
        except IOError:
            raise web.notfound()
Exemplo n.º 8
0
 def GET(self, path, filename):
     try:
         etag = str(
             os.path.getmtime(dirname + 'files/' + path + '/' + filename))
         last_modified = datetime.datetime.fromtimestamp(
             os.path.getmtime(dirname + 'files/' + path + '/' + filename))
         if web.modified(last_modified, etag):
             f = open(dirname + 'files/' + path + '/' + filename, 'rb')
             web.header('Content-type', mimetypes.guess_type(filename)[0])
             return f.read()
         else:
             return web.notmodified()
     except (FileNotFoundError, OSError):
         return web.notfound()
Exemplo n.º 9
0
    def GET(self, category, key, value, size):
        i = web.input(default="true")
        key = key.lower()
        
        def notfound():
            if config.default_image and i.default.lower() != "false" and not i.default.startswith('http://'):
                return read_file(config.default_image)
            elif i.default.startswith('http://'):
                raise web.seeother(i.default)
            else:
                raise web.notfound("")
                
        def redirect(id):
            size_part = size and ("-" + size) or ""
            url = "/%s/id/%s%s.jpg" % (category, id, size_part)
            
            query = web.ctx.env.get('QUERY_STRING')
            if query:
                url += '?' + query
            raise web.found(url)
        
        if key != 'id':
            value = _query(category, key, value)
            if value is None:
                return notfound()
            else:
                return redirect(value)
        else:
            d = db.details(value)
            if not d:
                return notfound()
                
            etag = "%s-%s" % (d.id, size.lower())
            if not web.modified(trim_microsecond(d.created), etag=etag):
                raise web.notmodified()

            web.header('Cache-Control', 'public')
            web.expires(100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years.
            web.header('Content-Type', 'image/jpeg')
            try:
                return read_image(d, size)
            except IOError:
                raise web.notfound()
Exemplo n.º 10
0
 def GET(self, category, key, value, size):
     i = web.input(default="true")
     key = key.lower()
     
     d = _query(category, key, value)
     if d:
         if key == 'id':
             etag = "%s-%s" % (d.id, size.lower())
             if not web.modified(trim_microsecond(d.created), etag=etag):
                 raise web.notmodified()
             web.header('Cache-Control', 'public')
             web.expires(100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years.
             
         web.header('Content-Type', 'image/jpeg')
         return read_image(d, size)
     elif config.default_image and i.default.lower() != "false" and not i.default.startswith('http://'):
         return read_file(config.default_image)
     elif i.default.startswith('http://'):
         raise web.seeother(i.default)
     else:
         raise web.notfound("")
Exemplo n.º 11
0
    def GET(self, category, key, value, size):
        i = web.input(default="true")
        key = key.lower()

        def is_valid_url(url):
            return url.startswith("http://") or url.startswith("https://")

        def notfound():
            if key in ["id", "olid"] and config.get("upstream_base_url"):
                # this is only used in development
                base = web.rstrips(config.upstream_base_url, "/")
                raise web.redirect(base + web.ctx.fullpath)
            elif config.default_image and i.default.lower() != "false" and not is_valid_url(i.default):
                return read_file(config.default_image)
            elif is_valid_url(i.default):
                raise web.seeother(i.default)
            else:
                raise web.notfound("")

        def redirect(id):
            size_part = size and ("-" + size) or ""
            url = "/%s/id/%s%s.jpg" % (category, id, size_part)

            query = web.ctx.env.get('QUERY_STRING')
            if query:
                url += '?' + query
            raise web.found(url)

        if key == 'isbn':
            value = value.replace("-", "").strip() # strip hyphens from ISBN
            # Disabling ratelimit as iptables is taking care of botnets.
            #value = self.ratelimit_query(category, key, value)
            value = self.query(category, key, value)

            # Redirect isbn requests to archive.org.
            # This will heavily reduce the load on coverstore server.
            # The max_coveritem_index config parameter specifies the latest
            # olcovers items uploaded to archive.org.
            if value and self.is_cover_in_cluster(value):
                url = zipview_url_from_id(int(value), size)
                raise web.found(url)
        elif key == 'ia':
            url = self.get_ia_cover_url(value, size)
            if url:
                raise web.found(url)
            else:
                value = None # notfound or redirect to default. handled later.
        elif key != 'id':
            value = self.query(category, key, value)

        if value and safeint(value) in config.blocked_covers:
            raise web.notfound()

        # redirect to archive.org cluster for large size and original images whenever possible
        if value and (size == "L" or size == "") and self.is_cover_in_cluster(value):
            url = zipview_url_from_id(int(value), size)
            raise web.found(url)

        d = value and self.get_details(value, size.lower())
        if not d:
            return notfound()

        # set cache-for-ever headers only when requested with ID
        if key == 'id':
            etag = "%s-%s" % (d.id, size.lower())
            if not web.modified(trim_microsecond(d.created), etag=etag):
                raise web.notmodified()

            web.header('Cache-Control', 'public')
            web.expires(100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years.
        else:
            web.header('Cache-Control', 'public')
            web.expires(10*60) # Allow the client to cache the image for 10 mins to avoid further requests

        web.header('Content-Type', 'image/jpeg')
        try:
            return read_image(d, size)
        except IOError:
            raise web.notfound()
Exemplo n.º 12
0
class Service(object):

    """Cache service

    This service implements the content delivering caching mechanism.
    All requests handled by this service produces cache manipulation
    on the Google Datastore (with a Memcache top layer).

    - origin: Set the origin url (type: String; mandatory)
    - forceTTL: Does not honor CacheControl value, replacing cache TTL by this value (type: Integer)
    - maxTTL: When the CacheControl value is honored (forceTTL not set), the cache TTL
    value cannot be greater than this value (otherwise, it is overriden). (type: Integer)
    - ignoreQueryString: Tell if the trailing HTTP query string is not taken into account
    to generate the cache object key in Datastore. In other terms, if this value is set
    to True, /url/path/obj.js?v=42 and /url/path/obj.js referer to the same object. (type: Boolean; default: False)
    - forwardPost: If it is True, POST requests will be forwarded, instead of being redirected (type: Boolean; default: True)
    - allowFlushFrom: Specify client IP which are allowed to make DELETE requests to flush
    cache object explicitly. (type: List)
    - disableIfModifiedSince: Disable IMS request during object refresh. (type: Boolean; default: False)
    - prefetch: (EXPERIMENTAL) Prefetch content from HTML or other pages. (type: Boolean; default: False)
    - headerBlacklist: Set list of origin headers to remove. (type: List)
    """

    origin = None
    forceTTL = None
    maxTTL = None
    ignoreQueryString = False
    forwardPost = True
    # Set your client IP address to authorize cache entry deletion
    allowFlushFrom = ['127.0.0.1']
    prefetch = False
    disableIfModifiedSince = False
    # undocumented: remove querystring before forwarding
    # the request to the origin
    stripForwardedQueryString = False

    # These headers won't be forwarded
    headerBlacklist = []
    _headerBlacklist = [
            'date',
            'last-modified',
            'via',
            'expires',
            'etag'
            ]

    def __init__(self):
        self.name = self.__class__.__name__
        self.cache = type(self.name, (Cache,), {})
        # Register the dynamic object globally
        # if not, pickle cannot find it for serialization
        globals()[self.name] = self.cache

    def GET(self, request, _beforeWriteCache=None):
        if self.ignoreQueryString is False:
            request += web.ctx.query
        try:
            cache = self.readCache(request)
            if cache is None:
                cache = self.writeCache(request, _beforeWriteCache=_beforeWriteCache)
        except runtime.DeadlineExceededError:
            raise web.SeeOther(self.origin + request, absolute=True)
        except CacheExpired, cache:
            cache = self.writeCache(request, cache=cache(), _beforeWriteCache=_beforeWriteCache)
        if not web.modified(cache.lastModified):
            raise web.HTTPError(status='304 Not Modified')
        web.header('Expires', web.httpdate(cache.expires))
        for h in cache.headers:
            print h
        return cache.data
Exemplo n.º 13
0
 def data(self,
          earliesttime=None,
          latesttime=None,
          lastget=None,
          raise_exception=True):
     """
     Return data for an individual station after earliesttime &
     before latesttime.
     """
     ## TODO: Better latesttime & earliesttime checking. Limits?
     log.debug("starting station request...")
     if raise_exception:
         web.modified(self.lastupdate('obs'))
     sql = []
     sql.append("""select vc.mes_datetime as data_time,
                          s.sensor_id as sensor_id,
                          v.value_num/s.scaling as data_value,
                          s.symbol as data_symbol,
                          s.data_number as data_number,
                          st.vmdb_id as vmdb_id,
                          x.xmltagname as xmltagname,
                          x.xsitype as xsitype,
                          di.datex_id as datex_id,
                          st.station_name as station_name
                   from icecast.value v, icecast.value_control vc,
                        icecast.sensor s, icecast.application_symbol app_s,
                        icecast.station st, exportws.xmltags x,
                        exportws.sensorindex di
                   where v.vc_id = vc.vc_id
                   and v.value_status>= 0
                   and v.vc_id in""")
     variables = {
         'stationid': self.row['station_id'],
     }
     if earliesttime or latesttime:
         sql.append("""( select vc_id
                         from icecast.value_control
                         where station_id = $stationid""")
         if earliesttime:
             log.debug("extracting from %s" % earliesttime)
             sql.append("and mes_datetime >= $earliesttime")
             variables['earliesttime'] = earliesttime
         if latesttime:
             log.debug("extracting since %s" % latesttime)
             sql.append("and mes_datetime <= $latesttime")
             variables['latesttime'] = latesttime
         sql.append(")")
     elif lastget:
         log.debug("extracting data since %s" % lastget)
         # We can't use > over >= in the sql as entry_datetime is stored
         # to sub-second accuracy in postgres. (mes_datetime isn't!!)
         # We'll just add a second to lastget instead.
         sql.append("""( select vc_id
                         from icecast.value_control
                         where station_id = $stationid
                         and entry_datetime >= $nextget
                        )""")
         variables['nextget'] = lastget + timedelta(seconds=1)
     else:
         sql.append("""( select max(vc_id)
                         from icecast.value_control
                         where station_id = $stationid
                        )""")
     sql.append("""and v.sensor_id = s.sensor_id
                   and s.symbol = app_s.symbol
                   and s.symbol = x.data_symbol
                   and s.symbol = di.data_symbol
                   and s.data_number = di.data_number
                   and app_s.application_key = 'ExportObservationXML'
                   and vc.station_id = st.station_id
                   order by vc.mes_datetime; """)
     result = db.query(' '.join(sql), vars=variables)
     if not result and raise_exception:
         # FIXME: Should this be here?
         web.header('Content-Type', 'text/html; charset=utf-8')
         raise web.HTTPError("404 not found", {}, "No data available.")
     return Data(result, {
         self.row['vmdb_id']: self,
     })
Exemplo n.º 14
0
    def data(self, earliesttime=None, latesttime=None, lastget=None,
             raise_exception=True):
        """
        Return data for an individual station after earliesttime &
        before latesttime.
        """
        ## TODO: Better latesttime & earliesttime checking. Limits?
        log.debug("starting station request...")
        if raise_exception:
            web.modified(self.lastupdate('obs'))
        sql = []
        sql.append("""SELECT v.obs_creationtime as data_time,
                             v.db_insertiontime as insertion_time,
                             si.symbol as sensor_id,
                             st.station_name,
                             st.stn_id,
                             st.vmdb_id,
                             st.owning_region_id,
                             st.lat,
                             st.lon,
                             di.data_symbol,
                             di.data_number as data_number,
			     nvalue as data_value,
                             v.qc_check_total AS qctotal,
                             qc_check_failed as qcfailed,
                             x.xmltagname as xmltagname,
                             x.xsitype as xsitype,
                             di.datex_id as datex_id
                      FROM qm.data_value v,
                           qm.sensor_identity si,
                           qm.station_identity st,
                           exportws.xmltags x,
                           exportws.sensorindex di
                      WHERE True""")
        variables = {'stationid': self.row['stn_id'], }
        if earliesttime or latesttime:
            if earliesttime:
                log.debug("extracting from %s" % earliesttime)
                sql.append("and obs_creationtime >= $earliesttime")
                variables['earliesttime'] = earliesttime
            if latesttime:
                log.debug("extracting since %s" % latesttime)
                sql.append("and obs_creationtime <= $latesttime")
                variables['latesttime'] = latesttime
        elif lastget:
            log.debug("extracting data since %s" % lastget)
            # We can't use > over >= in the sql as entry_datetime is stored
            # to sub-second accuracy in postgres. (mes_datetime isn't!!)
            # We'll just add a second to lastget instead.
            sql.append("""and db_insertiontime >= $nextget""")
            variables['nextget'] = lastget + timedelta(seconds=1)
        else:
            sql.append("""and v.obs_creationtime = st.last_updated""")
        sql.append("""and v.stn_id = $stationid
                      and v.stn_id = st.stn_id
                      and v.sensor_id = si.sensor_id
                      and st.vmdb_id IS NOT NULL
                      and si.symbol = di.dqm_symbol
                      and di.data_symbol = x.data_symbol
                      order by v.obs_creationtime; """)
        result = db.query(' '.join(sql), vars=variables)
        if not result and raise_exception:
            # FIXME: Should this be here?
            web.header('Content-Type', 'text/html; charset=utf-8')
            raise web.HTTPError("404 not found", {}, "No data available.")
        return Data(result, {self.row['vmdb_id']: self,})