示例#1
0
    def aggregate(self):
        parser = AggregateParamParser(request.params)
        params, errors = parser.parse()

        if errors:
            response.status = 400
            return {'errors': errors}

        params['cuts'] = params.pop('cut')
        params['drilldowns'] = params.pop('drilldown')
        dataset = params.pop('dataset')
        format = params.pop('format')
        require.dataset.read(dataset)

        try:
            cache = AggregationCache(dataset)
            result = cache.aggregate(**params)
            if 'drilldown' in result:
                result['drilldown'] = drilldowns_apply_links(dataset.name,
                    result['drilldown'])

            response.last_modified = dataset.updated_at
            if cache.cache_enabled and 'cache_key' in result['summary']:
                etag_cache(result['summary']['cache_key'])

        except (KeyError, ValueError) as ve:
            log.exception(ve)
            response.status = 400
            return {'errors': ['Invalid aggregation query: %r' % ve]}

        if format == 'csv':
            return write_csv(result['drilldown'], response,
                filename=dataset.name + '.csv')
        return to_jsonp(result)
示例#2
0
def etag_cache_keygen(*a):
    """
    Generate ETag key for the cache.
    This automatically includes the request cookie
    """
    etag = hashlib.sha1(repr(a)+repr(request.cookies)).hexdigest()
    etag_cache(etag)
示例#3
0
    def aggregate(self):
        parser = AggregateParamParser(request.params)
        params, errors = parser.parse()

        if errors:
            response.status = 400
            return {'errors': errors}

        params['cuts'] = params.pop('cut')
        params['drilldowns'] = params.pop('drilldown')
        dataset = params.pop('dataset')
        require.dataset.read(dataset)

        try:
            cache = AggregationCache(dataset)
            result = cache.aggregate(**params)
            if 'drilldown' in result:
                result['drilldown'] = drilldowns_apply_links(dataset.name,
                    result['drilldown'])

            if cache.cache_enabled and 'cache_key' in result['summary']:
                if 'Pragma' in response.headers:
                    del response.headers['Pragma']
                response.cache_control = 'public; max-age: 84600'
                etag_cache(result['summary']['cache_key'])

        except (KeyError, ValueError) as ve:
            log.exception(ve)
            response.status = 400
            return {'errors': ['Invalid aggregation query: %r' % ve]}

        return result
示例#4
0
def serve_file(fp, filename, content_type, last_modified=None, cache_expires=None, size=None, embed=True, etag=None):
    '''Sets the response headers and serves as a wsgi iter'''
    if not etag and filename and last_modified:
        etag = u'{0}?{1}'.format(filename, last_modified).encode('utf-8')
    if etag:
        etag_cache(etag)
    pylons.response.headers['Content-Type'] = ''
    pylons.response.content_type = content_type.encode('utf-8')
    pylons.response.cache_expires = cache_expires or asint(
        tg.config.get('files_expires_header_secs', 60 * 60))
    pylons.response.last_modified = last_modified
    if size:
        pylons.response.content_length = size
    if 'Pragma' in pylons.response.headers:
        del pylons.response.headers['Pragma']
    if 'Cache-Control' in pylons.response.headers:
        del pylons.response.headers['Cache-Control']
    if not embed:
        pylons.response.headers.add(
            'Content-Disposition',
            'attachment;filename="%s"' % filename.encode('utf-8'))
    # http://code.google.com/p/modwsgi/wiki/FileWrapperExtension
    block_size = 4096
    if 'wsgi.file_wrapper' in tg.request.environ:
        return tg.request.environ['wsgi.file_wrapper'](fp, block_size)
    else:
        return iter(lambda: fp.read(block_size), '')
示例#5
0
文件: raw.py 项目: bashia/repoman
    def get_raw_by_user(self, user, image, format='json'):
        image_q = meta.Session.query(Image)
        image = image_q.filter(Image.name==image)\
                       .filter(Image.owner.has(User.user_name==user))\
                       .first()

        if not image:
            abort(404, '404 Not Found')
        else:
            if not image.raw_uploaded:
                abort(404, '404 Not Found')

            #pass through http requests and unauthenticated https requests
            if not image.unauthenticated_access:
                inline_auth(AnyOf(AllOf(OwnsImage(image), IsAthuenticated()),
                                  AllOf(SharedWith(image), IsAthuenticated())),
                                  auth_403)

            file_path = path.join(app_globals.image_storage, image.path)
            try:
            	content_length = path.getsize(file_path)
            	response.headers['X-content-length'] = str(content_length)
            except:
            	abort(500, '500 Internal Error')
            	
            etag_cache(str(image.path) + '_' + str(image.version))

            image_file = open(file_path, 'rb')
            try:
                return h.stream_img(image_file)
            except:
                abort(500, '500 Internal Error')
示例#6
0
    def aggregate(self):
        errors = []
        params = request.params

        # get and check parameters
        dataset = self._dataset(params, errors)
        drilldowns = self._drilldowns(params, errors)
        cuts = self._cuts(params, errors)
        order = self._order(params, errors)
        measure = self._measure(params, dataset, errors)
        page = self._to_int('page', params.get('page', 1), errors)
        pagesize = self._to_int('pagesize', params.get('pagesize', 10000),
                                errors)
        if errors:
            return {'errors': errors}

        try:
            cache = AggregationCache(dataset)
            result = cache.aggregate(measure=measure, 
                                     drilldowns=drilldowns, 
                                     cuts=cuts, page=page, 
                                     pagesize=pagesize, order=order)

            if cache.cache_enabled and 'cache_key' in result['summary']:
                if 'Pragma' in response.headers:
                    del response.headers['Pragma']
                response.cache_control = 'public; max-age: 84600'
                etag_cache(result['summary']['cache_key'])

        except (KeyError, ValueError) as ve:
            log.exception(ve)
            return {'errors': ['Invalid aggregation query: %r' % ve]}

        return result
 def etagged(self, etag):
     # this is needed because the default content type is overridden within the config
     # etag_cache should probably do this, in order that the content-type header is popped
     # if it is None
     pylons.response.headers.pop('Content-Type')
     etag_cache(etag)
     return "bar"
示例#8
0
def render_png(io, mtime, content_type="image/png"):
    response.content_type = content_type
    etag_cache(key=hashlib.sha1(io).hexdigest())
    response.charset = None
    response.last_modified = rfc822.formatdate(timeval=mtime)
    del response.headers['Cache-Control']
    response.content_length = len(io)
    response.pragma = None
    return io
示例#9
0
def etag_cache_keygen(*a):
    """
    Generate ETag key for the cache.
    This automatically includes the username taken from the session cookie
    with the help of pylons
    """
    # Get the account name (authentication in pylons sets it to the
    # environment variable REMOTE_USER)
    account_name = request.environ.get('REMOTE_USER', None)
    etag = hashlib.sha1(repr(a) + repr(account_name)).hexdigest()
    etag_cache(etag)
示例#10
0
文件: raw.py 项目: genba/repoman
    def get_raw_by_user(self, user, image, hypervisor=None, format='json'):
        image_q = meta.Session.query(Image)
        image = image_q.filter(Image.name==image)\
                       .filter(Image.owner.has(User.user_name==user))\
                       .first()

        if not image:
            abort(404, '404 Not Found')
        else:
            if not image.raw_uploaded:
                abort(404, '404 Not Found')

            #pass through http requests and unauthenticated https requests
            if not image.unauthenticated_access:
                inline_auth(AnyOf(AllOf(OwnsImage(image), IsAthuenticated()),
                                  AllOf(SharedWith(image), IsAthuenticated())),
                                  auth_403)

            # If no hypervisor is given, and the image has the hypervisor
            # metadata variable set to something, then lets set the
            # hypervisor to the metadata variable.
            if (hypervisor == None) and (image.hypervisor != None):
                hypervisor = image.hypervisor.split(',')[0]

            # If hypervisor is still None, then let's default to 'xen'.
            # This is mostly to support images that do not have the hypervisor variable
            # set. (pre multi-hypervisor support)
            if hypervisor == None:
                hypervisor = 'xen'

            file_path = path.join(app_globals.image_storage, '%s_%s_%s' % (user, image.name, hypervisor))

            # Check if file actually exists
            if not path.exists(file_path):
                abort(404, '404 Not Found')

            try:
            	content_length = path.getsize(file_path)
            	response.headers['X-content-length'] = str(content_length)
            except Exception, e:
            	abort(500, '500 Internal Error')

            # Set the filename
            #response.headers['Content-Disposition'] = str('attachment; filename="%s"' % (image.name))

            etag_cache(str(('%s_%s_%s' % (user, image.name, hypervisor)) + '_' + str(image.version)))

            image_file = open(file_path, 'rb')

            try:
                return h.stream_img(image_file)
            except Exception, e:
                abort(500, '500 Internal Error')
示例#11
0
def render_png(io, mtime, content_type="image/png", cache_forever=False):
    response.content_type = content_type
    if not cache_forever:
        etag_cache(key=hashlib.sha1(io).hexdigest())
        del response.headers['Cache-Control']
    else:
        response.headers['Cache-Control'] = 'max-age=31556926'
    response.charset = None
    response.last_modified = rfc822.formatdate(timeval=mtime)
    response.content_length = len(io)
    response.pragma = None
    return io
示例#12
0
    def raw_image(self, id):
        item = meta.find(model.Medium, id)

        p = ImageFile.Parser()
        p.feed(item.image_data.getvalue())
        img = p.close()

        buffer = StringIO()
        img.save(buffer, format='png')
        response.content_type = 'image/png'

        etag_cache(str(item.updated_ts))
        return buffer.getvalue()
示例#13
0
 def read_image(self, id, x=None, y=None):
     etag_cache(sha1(str(id)+str(x)+str(y)).hexdigest())
     image = ApplicationImage.by_id(id)
     handle = Image.open(StringIO(image.data))
     if not image:
         abort(404)
     response.content_type = 'image/png'
     outfh = StringIO()
     try:
         if x is not None and y is not None:
             size = (int(x), int(y))
             handle = ImageOps.fit(handle, size, Image.ANTIALIAS, 0.01, (0.0, 0.0))
     except ValueError, e:
         pass
示例#14
0
文件: db.py 项目: adrianpike/wwscc
	def index(self, name):
		log.debug("dbload: name is %s" % name)
		ret = self.session.query(Data).get(name)
		if ret is not None:
			# ETag stops unnecessary data, max-age will stop requests completely for 5 minutes
			# As data could change, make it short but still enough to make a single session nice
			# and fast
			response.headers['Content-type'] = str(ret.mime)
			response.headers['Last-Modified'] = self.rfc850(ret.mod)
			etag_cache("%s" % ret.mod)
			response.headers['Cache-Control'] = 'max-age=360' 
			response.headers.pop('Pragma', None)
			log.debug("dbload: Returning %s" % ret.name)
			return str(ret.data)
		return None
示例#15
0
 def GET_wikipage(self,name,skiplayout):
     p = allWikiPagesCached[name]
     content_type = p.get('content-type', 'text/html')
     if content_type == 'text/html':
       if skiplayout:
           # Get just the html of the wiki page
           html = WikiPageCached(p).content()
           return WikiPageInline(html=html, name=name, skiplayout=skiplayout, wiki_url=p['url']).render()
       else:
           return WikiPage(name,p,skiplayout=skiplayout).render()
     else:
       # Send the content back as is, with cache control
       page = WikiPageCached(p)
       response.headers['Content-Type'] = content_type
       response.headers['Cache-Control'] = 'max-age=%d' % 300
       etag_cache(page.etag())
       return page.content()
示例#16
0
 def feed(self):
     etag_cache(get_last())
     alerts = model.Session.query(model.Alert)
     alerts = alerts.order_by(model.sa.desc(model.Alert.time)).limit(100)
     f = Atom1Feed(
         title = "Alerts",
         link=url_for(),
         description="Alerts",
     )
     for a in alerts:
         f.add_item(
             title="%s - %s" %(a.addr, a.name),
             link=url_for(controller="alerts",action="notes",id=a.id),
             description="Down at %s\nUp at %s" % (a.time,a.uptime),
             pubdate = a.time,
         )
     response.content_type = 'application/atom+xml'
     return f.writeString('utf-8')
示例#17
0
    def thumbnail(self, id, width, height):
        item = meta.find(model.Medium, id)
        etag_cache(str(item.updated_ts))

        p = ImageFile.Parser()
        p.feed(item.image_data.getvalue())
        img = p.close()

        if item.image_crop:
            img = img.crop(item.image_crop)

        #~ log.debug("size: %s, %s" % (width, height))
        size = int(width), int(height)
        img.thumbnail(size)
        #~ log.debug("imgsize: %s, %s" % img.size)

        buffer = StringIO()
        img.save(buffer, format='png')
        response.content_type = 'image/png'

        return buffer.getvalue()
示例#18
0
 def up_addrs_json(self):
     etag_cache(get_last())
     return {'addrs': [a for a in get_all_up()]}
示例#19
0
 def etagged(self, etag):
     etag_cache(etag)
     return "bar"
示例#20
0
 def last(self):
     etag_cache(get_last())
     return get_last()
示例#21
0
 def test_etag_cache(self):
     etag_cache('test')
     return "from etag_cache"
示例#22
0
 def up_json(self):
     etag_cache(get_last())
     up = get_up()
     return {'up': [c.to_dict() for c in up]}
示例#23
0
 def last(self):
     etag_cache(get_last())
     return get_last()
示例#24
0
 def down_addrs_json(self):
     etag_cache(get_last())
     return {'addrs': [a.addr for a in get_down()]}
 def etagged(self, etag):
     etag_cache(etag)
     return "bar"
示例#26
0
    def aggregate(self):
        """
        Aggregation of a dataset based on URL parameters. It serves the
        aggregation from a cache if possible, and if not it computes it (it's
        performed in the aggregation cache for some reason).
        """

        # Parse the aggregation parameters to get them into the right format
        parser = AggregateParamParser(request.params)
        params, errors = parser.parse()

        # If there were parsing errors we return them with status code 400
        # as jsonp, irrespective of what format was asked for.
        if errors:
            response.status = 400
            return to_jsonp({'errors': errors})

        # URL parameters are always singular nouns but we work with some
        # as plural nouns so we pop them into the plural version
        params['cuts'] = params.pop('cut')
        params['drilldowns'] = params.pop('drilldown')
        params['measures'] = params.pop('measure')

        # Get the dataset and the format and remove from the parameters
        dataset = params.pop('dataset')
        format = params.pop('format')

        # User must have the right to read the dataset to perform aggregation
        require.dataset.read(dataset)

        # Create response headers from the parameters
        self._response_params(params)

        try:
            # Create an aggregation cache for the dataset and aggregate its
            # results. The cache will perform the aggreagation if it doesn't
            # have a cached result
            cache = AggregationCache(dataset)
            result = cache.aggregate(**params)

            # If the result has drilldown we create html_url values for its
            # dimensions (linked data).
            if 'drilldown' in result:
                result['drilldown'] = drilldowns_apply_links(
                    dataset.name, result['drilldown'])

            # Do the ETag caching based on the cache_key in the summary
            # this is a weird place to do it since the heavy lifting has
            # already been performed above. TODO: Needs rethinking.
            response.last_modified = dataset.updated_at
            if cache.cache_enabled and 'cache_key' in result['summary']:
                etag_cache(result['summary']['cache_key'])

        except (KeyError, ValueError) as ve:
            # We log possible errors and return them with status code 400
            log.exception(ve)
            response.status = 400
            return to_jsonp({'errors': [unicode(ve)]})

        # If the requested format is csv we write the drilldown results into
        # a csv file and return it, if not we return a jsonp result (default)
        if format == 'csv':
            return write_csv(result['drilldown'],
                             response,
                             filename=dataset.name + '.csv')
        return to_jsonp(result)
示例#27
0
 def down_addrs_json(self):
     etag_cache(get_last())
     return {'addrs': [a.addr for a in get_down()]}
示例#28
0
 def up_addrs_json(self):
     etag_cache(get_last())
     return {'addrs': [a for a in get_all_up()]}
示例#29
0
def etag_cache_keygen(*a):
    etag = hashlib.sha1(repr(a)).hexdigest()
    etag_cache(etag)
示例#30
0
 def up(self):
     etag_cache(get_last())
     c.up = get_up()
     return render('/alerts/up.mako')
示例#31
0
 def legacy_etag_cache(self):
     # used to crash
     response = etag_cache('test')
     response.body = 'from etag_cache'
     return response
示例#32
0
 def down_json(self):
     etag_cache(get_last())
     down = get_down()
     return {'down': [c.to_dict() for c in down]}
示例#33
0
 def up_json(self):
     etag_cache(get_last())
     up = get_up()
     return {'up': [c.to_dict() for c in up]}
示例#34
0
    def aggregate(self):
        """
        Aggregation of a dataset based on URL parameters. It serves the 
        aggregation from a cache if possible, and if not it computes it (it's
        performed in the aggregation cache for some reason).
        """

        # Parse the aggregation parameters to get them into the right format
        parser = AggregateParamParser(request.params)
        params, errors = parser.parse()

        # If there were parsing errors we return them with status code 400
        # as jsonp, irrespective of what format was asked for.
        if errors:
            response.status = 400
            return to_jsonp({'errors': errors})

        # URL parameters are always singular nouns but we work with some
        # as plural nouns so we pop them into the plural version
        params['cuts'] = params.pop('cut')
        params['drilldowns'] = params.pop('drilldown')
        params['measures'] = params.pop('measure')

        # Get the dataset and the format and remove from the parameters
        dataset = params.pop('dataset')
        format = params.pop('format')

        # User must have the right to read the dataset to perform aggregation
        require.dataset.read(dataset)

        # Create response headers from the parameters
        self._response_params(params)

        try:
            # Create an aggregation cache for the dataset and aggregate its
            # results. The cache will perform the aggreagation if it doesn't
            # have a cached result
            cache = AggregationCache(dataset)
            result = cache.aggregate(**params)

            # If the result has drilldown we create html_url values for its
            # dimensions (linked data).
            if 'drilldown' in result:
                result['drilldown'] = drilldowns_apply_links(dataset.name,
                    result['drilldown'])

            # Do the ETag caching based on the cache_key in the summary
            # this is a weird place to do it since the heavy lifting has
            # already been performed above. TODO: Needs rethinking.
            response.last_modified = dataset.updated_at
            if cache.cache_enabled and 'cache_key' in result['summary']:
                etag_cache(result['summary']['cache_key'])

        except (KeyError, ValueError) as ve:
            # We log possible errors and return them with status code 400
            log.exception(ve)
            response.status = 400
            return to_jsonp({'errors': [unicode(ve)]})

        # If the requested format is csv we write the drilldown results into
        # a csv file and return it, if not we return a jsonp result (default)
        if format == 'csv':
            return write_csv(result['drilldown'], response,
                filename=dataset.name + '.csv')
        return to_jsonp(result)
示例#35
0
 def down_json(self):
     etag_cache(get_last())
     down = get_down()
     return {'down': [c.to_dict() for c in down]}
示例#36
0
 def test_etag_cache(self):
     etag_cache('test')
     return "from etag_cache"
示例#37
0
 def up(self):
     etag_cache(get_last())
     c.up = get_up()
     return render('/alerts/up.mako')