예제 #1
0
def downloadFile():
    from gluon.contenttype import contenttype
    response.headers['Content-Type'] = contenttype('application/octet-stream')
    print request.vars.path
    response.headers['Content-Disposition'] = 'attachment; filename="' + (
        request.vars.path.split("/")[-1]) + '"'
    return response.stream(open(request.vars.path, 'rb'), chunk_size=4096)
예제 #2
0
        def export_to_po(self, data):
            """ Returns a ".po" file constructed from given strings """

            from subprocess import call
            from tempfile import NamedTemporaryFile
            from gluon.contenttype import contenttype

            f = NamedTemporaryFile(delete=False)
            csvfilename = "%s.csv" % f.name
            self.write_csvfile(csvfilename, data)

            g = NamedTemporaryFile(delete=False)
            pofilename = "%s.po" % g.name
            # Shell needed on Win32
            call(["csv2po", "-i", csvfilename, "-o", pofilename], shell=True)

            h = open(pofilename, "r")

            # Modify headers to return the po file for download
            filename = "trans.po"
            disposition = "attachment; filename=\"%s\"" % filename
            response = current.response
            response.headers["Content-Type"] = contenttype(".po")
            response.headers["Content-disposition"] = disposition

            h.seek(0)
            return h.read()
예제 #3
0
    def download(self, request, db):
        """
            example of usage in controller:
            def donwload(): return response.download(request,db)
            download from http://..../download/filename
        """

        import os
        import gluon.contenttype as c
        if not request.args:
            raise HTTP(404)
        name = request.args[-1]
        items = re.compile('(?P<table>.*?)\.(?P<field>.*?)\..*').match(name)
        if not items:
            raise HTTP(404)
        (t, f) = (items.group('table'), items.group('field'))
        field = db[t][f]
        uploadfield = field.uploadfield
        authorize = field.authorize
        if authorize or isinstance(uploadfield, str):
            rows = db(db[t][f] == name).select()
            if not rows:
                raise HTTP(404)
            row = rows[0]
        if authorize and not authorize(row):
            raise HTTP(404)
        self.headers['Content-Type'] = c.contenttype(name)
        if isinstance(uploadfield, str):  # ## if file is in DB
            return row[uploadfield]
        else:

            # ## if file is on filesystem

            return self.stream(os.path.join(request.folder, 'uploads', name))
예제 #4
0
파일: s3translate.py 프로젝트: ursea/eden
    def export_to_po(self, data):
        """ Returns a ".po" file constructed from given strings """

        from subprocess import call
        from tempfile import NamedTemporaryFile
        from gluon.contenttype import contenttype

        f = NamedTemporaryFile(delete=False)
        csvfilename = "%s.csv" % f.name
        self.write_csvfile(csvfilename, data)

        g = NamedTemporaryFile(delete=False)
        pofilename = "%s.po" % g.name
        # Shell needed on Win32
        call(["csv2po", "-i", csvfilename, "-o", pofilename], shell=True)

        h = open(pofilename, "r")

        # Modify headers to return the po file for download
        filename = "trans.po"
        disposition = "attachment; filename=\"%s\"" % filename
        response = current.response
        response.headers["Content-Type"] = contenttype(".po")
        response.headers["Content-disposition"] = disposition

        h.seek(0)
        return h.read()
예제 #5
0
파일: s3export.py 프로젝트: owenwaller/eden
    def csv(self, resource):
        """
            Export resource as CSV

            @param resource: the resource to export

            @note: export does not include components!

            @todo: implement audit
        """

        db = current.db
        request = current.request
        response = current.response
        tablename = resource.tablename

        if response:
            servername = request and "%s_" % request.env.server_name or ""
            filename = "%s%s.csv" % (servername, tablename)
            response.headers["Content-Type"] = contenttype(".csv")
            response.headers[
                "Content-disposition"] = "attachment; filename=%s" % filename

        rows = resource.select()
        return str(rows)
예제 #6
0
파일: mail.py 프로젝트: x91111/web3py
 def __init__(
     self,
     payload,
     filename=None,
     content_id=None,
     content_type=None,
         encoding='utf-8'):
     if isinstance(payload, str):
         if filename is None:
             filename = os.path.basename(payload)
         payload = read_file(payload, 'rb')
     else:
         if filename is None:
             raise Exception('Missing attachment name')
         payload = payload.read()
     filename = filename.encode(encoding)
     if content_type is None:
         content_type = contenttype(filename)
     self.my_filename = filename
     self.my_payload = payload
     MIMEBase.MIMEBase.__init__(self, *content_type.split('/', 1))
     self.set_payload(payload)
     self['Content-Disposition'] = 'attachment; filename="%s"' % filename
     if not content_id is None:
         self['Content-Id'] = '<%s>' % content_id.encode(encoding)
     Encoders.encode_base64(self)
예제 #7
0
파일: globals.py 프로젝트: Rimbo/web2py
 def f(_action=action, *a, **b):
     request.is_restful = True
     env = request.env
     is_json = env.content_type == "application/json"
     method = env.request_method
     if len(request.args) and "." in request.args[-1]:
         request.args[-1], _, request.extension = request.args[-1].rpartition(".")
         current.response.headers["Content-Type"] = contenttype("." + request.extension.lower())
     rest_action = _action().get(method, None)
     if not (rest_action and method == method.upper() and callable(rest_action)):
         raise HTTP(405, "method not allowed")
     try:
         vars = request.vars
         if method == "POST" and is_json:
             body = request.body.read()
             if len(body):
                 vars = sj.loads(body)
         res = rest_action(*request.args, **vars)
         if is_json and not isinstance(res, str):
             res = json(res)
         return res
     except TypeError, e:
         exc_type, exc_value, exc_traceback = sys.exc_info()
         if len(traceback.extract_tb(exc_traceback)) == 1:
             raise HTTP(400, "invalid arguments")
         else:
             raise
예제 #8
0
    def download(
            self, request, db, chunk_size=DEFAULT_CHUNK_SIZE, attachment=True,
            download_filename=None):
        """
        Adapted from Response.download.
        request.args(0): integer, id of book record.
        """
        current.session.forget(current.response)

        if not request.args:
            raise HTTP(404)

        try:
            book = Book.from_id(request.args(0))
        except LookupError:
            raise HTTP(404)
        filename = book.cbz

        if not filename or not os.path.exists(filename):
            raise HTTP(404)

        stream = os.path.abspath(filename)

        headers = self.headers
        headers['Content-Type'] = contenttype(filename)
        if download_filename is None:
            download_filename = os.path.basename(filename)
        if attachment:
            fmt = 'attachment; filename="%s"'
            headers['Content-Disposition'] = \
                fmt % download_filename.replace('"', '\"')
        return self.stream(stream, chunk_size=chunk_size, request=request)
예제 #9
0
파일: globals.py 프로젝트: OuIChien/web2py
    def download(self, request, db, chunk_size=DEFAULT_CHUNK_SIZE, attachment=True, download_filename=None):
        """
        Example of usage in controller::

            def download():
                return response.download(request, db)

        Downloads from http://..../download/filename
        """

        current.session.forget(current.response)

        if not request.args:
            raise HTTP(404)
        name = request.args[-1]
        items = re.compile('(?P<table>.*?)\.(?P<field>.*?)\..*').match(name)
        if not items:
            raise HTTP(404)
        (t, f) = (items.group('table'), items.group('field'))
        try:
            field = db[t][f]
        except AttributeError:
            raise HTTP(404)
        try:
            (filename, stream) = field.retrieve(name, nameonly=True)
        except IOError:
            raise HTTP(404)
        headers = self.headers
        headers['Content-Type'] = contenttype(name)
        if download_filename is None:
            download_filename = filename
        if attachment:
            headers['Content-Disposition'] = \
                'attachment; filename="%s"' % download_filename.replace('"', '\"')
        return self.stream(stream, chunk_size=chunk_size, request=request)
예제 #10
0
def export():
    """
    Exportar requerimientos en formato CSV para ser visualizado en Excel
    
    GET /requirements/export
    """
    colnames = [('requirements.id','Requerimiento'),
                ('requirements.num_invoice','Factura'),
                ('requirements.total_invoice_converted','Total de la Factura'),
                ('requirements.requested_payment_converted','Pago solicitado'),
                ('requirements.approved_payment_converted','Pago aprobado'),
                ('requirements.description','Descripción del Requerimiento'),
                ('requirements.created_on','Creado @'),
                ('status.name','Estado del Requerimiento'),
                ('auth_user.first_name','Solicitante - Nombre'),
                ('auth_user.last_name','Solicitante - Apellido'),
                ('auth_user.email','Solicitante - Email'),
                ('user_banking_information.num_account','Beneficiario - Cuenta'),
                ('banks.name','Banco'),
                ('identification_types.name','Beneficiario - Documento'),
                ('user_banking_information.identification_number','Beneficiario - Identificación'),
                ('user_banking_information.beneficiary_email','Beneficiario - Email'),
                ('user_banking_information.beneficiary_fullname','Beneficiario - Nombre Completo')]
    cols_headers = [colname[1] for colname in colnames]
    cols = [colname[0] for colname in colnames]
    
    import csv, cStringIO
    s = cStringIO.StringIO()
    csv_kargs = dict(delimiter = ',', quotechar = '"', quoting = csv.QUOTE_ALL, null = '', colnames = cols, write_colnames = False)
    writer = csv.writer(s,csv_kargs)
    writer.writerow(cols_headers)
    
    db[model.tables.requirements].total_invoice_converted = Field.Virtual(lambda row: str(row['requirements'].total_invoice).replace(".",",") if row['requirements'].total_invoice else "")
    db[model.tables.requirements].requested_payment_converted = Field.Virtual(lambda row: str(row['requirements'].requested_payment).replace(".",","))
    db[model.tables.requirements].approved_payment_converted = Field.Virtual(lambda row: str(row['requirements'].approved_payment).replace(".",","))
    
    query = ((db[model.tables.requirements].id > 0) &
             (db[model.tables.requirements].status_id == db[model.tables.status].id) &
             (auth.settings.table_user.id == db[model.tables.requirements].created_by))
    left_query = (db[model.tables.user_banking_information].on(db[model.tables.user_banking_information].requirement_id == db[model.tables.requirements].id),
                  db[model.tables.banks].on((db[model.tables.banks].id == db[model.tables.user_banking_information].bank_id)),
                  db[model.tables.identification_types].on((db[model.tables.identification_types].id == db[model.tables.user_banking_information].identification_type_id)))
    rows = db(query).select(db[model.tables.requirements].ALL,
                            db[model.tables.status].ALL,
                            auth.settings.table_user.ALL,
                            db[model.tables.user_banking_information].ALL,
                            db[model.tables.banks].ALL,
                            db[model.tables.identification_types].ALL,
                            left = left_query,
                            orderby = db[model.tables.requirements].created_on)
    
    helpers.export_to_csv_file(rows,s,**csv_kargs)
    
    from datetime import datetime
    from gluon.contenttype import contenttype
    response.headers["Content-Type"] = contenttype('.csv')
    response.headers["Content-disposition"] = 'attachment; filename=requirements-%s.csv' % datetime.now().strftime("%Y%m%d-%H%M%S")
    response.headers["Content-Length"] = s.tell()
    
    return s.getvalue()
예제 #11
0
    def download(self, request, db, chunk_size=DEFAULT_CHUNK_SIZE, attachment=True, download_filename=None):
        """
        Example of usage in controller::

            def download():
                return response.download(request, db)

        Downloads from http://..../download/filename
        """

        current.session.forget(current.response)

        if not request.args:
            raise HTTP(404)
        name = request.args[-1]
        items = re.compile('(?P<table>.*?)\.(?P<field>.*?)\..*').match(name)
        if not items:
            raise HTTP(404)
        (t, f) = (items.group('table'), items.group('field'))
        try:
            field = db[t][f]
        except AttributeError:
            raise HTTP(404)
        try:
            (filename, stream) = field.retrieve(name,nameonly=True)
        except IOError:
            raise HTTP(404)
        headers = self.headers
        headers['Content-Type'] = contenttype(name)
        if download_filename == None:
            download_filename = filename
        if attachment:
            headers['Content-Disposition'] = \
                'attachment; filename="%s"' % download_filename.replace('"','\"')
        return self.stream(stream, chunk_size=chunk_size, request=request)
예제 #12
0
    def csv(self, resource):
        """
        Export resource as CSV (does not include components)

        @param resource: the resource to export

        @note: export does not include components!

        @todo: implement audit

        """

        db = self.db

        request = self.manager.request
        response = self.manager.response

        tablename = resource.tablename
        query = resource.get_query()

        if response:
            servername = request and "%s_" % request.env.server_name or ""
            filename = "%s%s.csv" % (servername, tablename)
            response.headers["Content-Type"] = contenttype(".csv")
            response.headers["Content-disposition"] = "attachment; filename=%s" % filename

        return str(db(query).select())
예제 #13
0
파일: main.py 프로젝트: abastardi/web2py
def serve_controller(request, response, session):
    """
    This function is used to generate a dynamic page.
    It first runs all models, then runs the function in the controller,
    and then tries to render the output using a view/template.
    this function must run from the [application] folder.
    A typical example would be the call to the url
    /[application]/[controller]/[function] that would result in a call
    to [function]() in applications/[application]/[controller].py
    rendered by applications/[application]/views/[controller]/[function].html
    """

    # ##################################################
    # build environment for controller and view
    # ##################################################

    environment = build_environment(request, response, session)

    # set default view, controller can override it

    response.view = '%s/%s.%s' % (request.controller,
                                  request.function,
                                  request.extension)

    # also, make sure the flash is passed through
    # ##################################################
    # process models, controller and view (if required)
    # ##################################################

    run_models_in(environment)
    response._view_environment = copy.copy(environment)
    page = run_controller_in(request.controller, request.function, environment)
    if isinstance(page, dict):
        response._vars = page
        response._view_environment.update(page)
        page = run_view_in(response._view_environment)

    if not request.env.web2py_disable_garbage_collect:
        # logic to garbage collect after exec, not always, once every 100 requests
        global requests
        requests = ('requests' in globals()) and (requests + 1) % 100 or 0
        if not requests:
            gc.collect()
        # end garbage collection logic

    # ##################################################
    # set default headers it not set
    # ##################################################

    default_headers = [
        ('Content-Type', contenttype('.' + request.extension)),
        ('Cache-Control',
         'no-store, no-cache, must-revalidate, post-check=0, pre-check=0'),
        ('Expires', time.strftime('%a, %d %b %Y %H:%M:%S GMT',
                                  time.gmtime())),
        ('Pragma', 'no-cache')]
    for key, value in default_headers:
        response.headers.setdefault(key, value)

    raise HTTP(response.status, page, **response.headers)
예제 #14
0
 def f(_action=action, *a, **b):
     request.is_restful = True
     env = request.env
     is_json = env.content_type == 'application/json'
     method = env.request_method
     if len(request.args) and '.' in request.args[-1]:
         request.args[-1], _, request.extension = request.args[
             -1].rpartition('.')
         current.response.headers['Content-Type'] = \
             contenttype('.' + request.extension.lower())
     rest_action = _action().get(method, None)
     if not (rest_action and method == method.upper()
             and callable(rest_action)):
         raise HTTP(405, "method not allowed")
     try:
         res = rest_action(*request.args, **request.vars)
         if is_json and not isinstance(res, str):
             res = json(res)
         return res
     except TypeError, e:
         exc_type, exc_value, exc_traceback = sys.exc_info()
         if len(traceback.extract_tb(exc_traceback)) == 1:
             raise HTTP(400, "invalid arguments")
         else:
             raise
예제 #15
0
    def csv(self, resource):
        """
        Export resource as CSV (does not include components)

        @param resource: the resource to export

        @note: export does not include components!

        @todo: implement audit

        """

        db = self.db

        request = self.manager.request
        response = self.manager.response

        tablename = resource.tablename
        query = resource.get_query()

        if response:
            servername = request and "%s_" % request.env.server_name or ""
            filename = "%s%s.csv" % (servername, tablename)
            response.headers["Content-Type"] = contenttype(".csv")
            response.headers["Content-disposition"] = "attachment; filename=%s" % filename

        limit = self._overwrite_limit()
        if limit is None:
            rows = db(query).select()
        else:
            rows = db(query).select(resource.table.ALL, limitby=(0, limit))

        return str(rows)
예제 #16
0
파일: s3export.py 프로젝트: sahana/eden
    def csv(self, resource):
        """
            Export resource as CSV

            Args:
                resource: the resource to export

            Note:
                Export does not include components!

            TODO:
                Implement audit
        """

        request = current.request
        response = current.response

        if response:
            servername = request and "%s_" % request.env.server_name or ""
            filename = "%s%s.csv" % (servername, resource.tablename)
            from gluon.contenttype import contenttype
            response.headers["Content-Type"] = contenttype(".csv")
            response.headers["Content-disposition"] = "attachment; filename=%s" % filename

        rows = resource.select(None, as_rows=True)
        return str(rows)
예제 #17
0
def download():
    '''
    Returns the cached image of the CAM.
    '''
    map_id = request.args(0)
    hash = ''
    if 'hash' in request.vars: hash = request.vars['hash']
    if access_denied(map_id, auth.user.id,
                     hash) and not auth.has_permission('read', db.Map, map_id):
        session.flash = T(
            "You do not have permissions required to use this function!")
        redirect(request.wsgi.environ['HTTP_REFERER'])

    cam = db.Map[map_id]
    try:
        str = cam.imgdata
        str = str[str.find(',') + 1:]
        str = str.decode('base64')

        response.headers['Content-Type'] = contenttype('.png')
        response.headers[
            'Content-disposition'] = 'attachment; filename=' + remove_restricted(
                cam.title) + '.png'
        return str
    except:
        return HTML(BODY(IMG(_src=cam.imgdata)))
예제 #18
0
def serve_controller(request, response, session):
    """
    This function is used to generate a dynamic page.
    It first runs all models, then runs the function in the controller,
    and then tries to render the output using a view/template.
    this function must run from the [application] folder.
    A typical example would be the call to the url
    /[application]/[controller]/[function] that would result in a call
    to [function]() in applications/[application]/[controller].py
    rendered by applications/[application]/views/[controller]/[function].html
    """

    # ##################################################
    # build environment for controller and view
    # ##################################################

    environment = build_environment(request, response, session)

    # set default view, controller can override it

    response.view = '%s/%s.%s' % (request.controller, request.function,
                                  request.extension)

    # also, make sure the flash is passed through
    # ##################################################
    # process models, controller and view (if required)
    # ##################################################

    run_models_in(environment)
    response._view_environment = copy.copy(environment)
    page = run_controller_in(request.controller, request.function, environment)
    if isinstance(page, dict):
        response._vars = page
        response._view_environment.update(page)
        page = run_view_in(response._view_environment)

    if not request.env.web2py_disable_garbage_collect:
        # logic to garbage collect after exec, not always, once every 100 requests
        global requests
        requests = ('requests' in globals()) and (requests + 1) % 100 or 0
        if not requests:
            gc.collect()
        # end garbage collection logic

    # ##################################################
    # set default headers it not set
    # ##################################################

    default_headers = [
        ('Content-Type', contenttype('.' + request.extension)),
        ('Cache-Control',
         'no-store, no-cache, must-revalidate, post-check=0, pre-check=0'),
        ('Expires', unlocalised_http_header_date(time.gmtime())),
        ('Pragma', 'no-cache')
    ]
    for key, value in default_headers:
        response.headers.setdefault(key, value)

    raise HTTP(response.status, page, **response.headers)
예제 #19
0
    def download(
            self, request, db, chunk_size=DEFAULT_CHUNK_SIZE, attachment=True,
            download_filename=None):
        """
        Adapted from Response.download.

        request.args(0): one of 'all', 'book', 'creator'
        request.args(1): integer, id of record if request.args(0) is 'book' or
            'creator'
        """
        current.session.forget(current.response)

        if not request.args:
            raise HTTP(404)

        tor_type = request.args(0)
        if tor_type not in ['all', 'book', 'creator']:
            raise HTTP(404)

        if tor_type in ['book', 'creator'] and not request.args(1):
            raise HTTP(404)

        filename = None
        if tor_type == 'all':
            tor_archive = TorrentArchive()
            name = '.'.join([tor_archive.name, 'torrent'])
            filename = os.path.join(
                tor_archive.base_path,
                tor_archive.category,
                tor_archive.name,
                name
            )
        elif tor_type == 'creator':
            try:
                creator = Creator.from_id(request.args(1))
            except LookupError:
                raise HTTP(404)
            filename = creator.torrent
        else:
            try:
                book = Book.from_id(request.args(1))
            except LookupError:
                raise HTTP(404)
            filename = book.torrent

        if not filename or not os.path.exists(filename):
            raise HTTP(404)

        stream = os.path.abspath(filename)

        headers = self.headers
        headers['Content-Type'] = contenttype(filename)
        if download_filename is None:
            download_filename = os.path.basename(filename)
        if attachment:
            fmt = 'attachment; filename="%s"'
            headers['Content-Disposition'] = \
                fmt % download_filename.replace('"', '\"')
        return self.stream(stream, chunk_size=chunk_size, request=request)
예제 #20
0
    def encode(self, resource, **attr):
        """
            Export data as a Scalable Vector Graphic

            Args:
                resource: the source of the data that is to be encoded
                          as an SVG. This may be:
                            - resource: the resource
                            - item:     a list of pre-fetched values
                                        the headings are in the first row
                                        the data types are in the second row

            Keyword Args:
                title: The export filename
                list_fields: Fields to include in list views
        """

        # Get the attributes
        #list_fields = attr.get("list_fields")
        #if not list_fields:
        #    list_fields = resource.list_fields()

        # @ToDo: PostGIS can extract SVG from DB (like GeoJSON)
        # http://postgis.refractions.net/documentation/manual-1.4/ST_AsSVG.html
        if resource.prefix == "gis" and resource.name == "location":
            #list_fields.append("wkt")
            list_fields = ["wkt"]
        #elif "location_id$wkt" not in list_fields:
        else:
            #list_fields.append("location_id$wkt")
            list_fields = ["location_id$wkt"]

        # Clear the WKT represent
        current.s3db.gis_location.wkt.represent = None

        # Extract the data from the resource
        (_title, types, lfields, headers,
         items) = self.extractResource(resource, list_fields)

        # @ToDo: Support multiple records
        wkt = items[0]["gis_location.wkt"]
        if not wkt:
            current.log.error("No Geometry!")

        # Convert to SVG
        title = attr.get("title", resource._ids[0])
        filename = "%s.svg" % title
        filepath = self.write_file(filename, wkt, **attr)

        # Response headers
        disposition = "attachment; filename=\"%s\"" % filename
        response = current.response
        response.headers["Content-Type"] = contenttype(".svg")
        response.headers["Content-disposition"] = disposition

        stream = open(filepath)
        return response.stream(stream,
                               chunk_size=DEFAULT_CHUNK_SIZE,
                               request=current.request)
예제 #21
0
def upload():
    f = request.vars.myfile.file
    response.headers['Content-Type'] = contenttype('*.xls')
    response.headers['Content-disposition'] = 'attachment;filename=%s_converted.xls' % (request.now)
    w=StringIO()
    a = FreeMindMap(f)
    a.save_excel(w)
    w.seek(0)
    return response.stream(w, chunk_size = 64 * 1024)
예제 #22
0
파일: svg.py 프로젝트: gunner272/eden
    def encode(self, resource, **attr):
        """
            Export data as a Scalable Vector Graphic

            @param resource: the source of the data that is to be encoded
                                as an SVG. This may be:
                                resource: the resource
                                item:     a list of pre-fetched values
                                          the headings are in the first row
                                          the data types are in the second row
            @param attr: dictionary of parameters:
                 * title:          The export filename
                 * list_fields:    Fields to include in list views
        """

        # Get the attributes
        #list_fields = attr.get("list_fields")
        #if not list_fields:
        #    list_fields = resource.list_fields()

        # @ToDo: PostGIS can extract SVG from DB (like GeoJSON)
        # http://postgis.refractions.net/documentation/manual-1.4/ST_AsSVG.html
        if resource.prefix == "gis" and resource.name == "location":
            #list_fields.append("wkt")
            list_fields = ["wkt"]
        elif "location_id$wkt" not in list_fields:
            #list_fields.append("location_id$wkt")
            list_fields = ["location_id$wkt"]

        # Clear the WKT represent
        current.s3db.gis_location.wkt.represent = None

        # Extract the data from the resource
        (_title, types, lfields, headers, items) = self.extractResource(resource,
                                                                        list_fields)

        # @ToDo: Support multiple records
        wkt = items[0]["gis_location.wkt"]
        if not wkt:
            error = "No Geometry!"
            from ..s3utils import s3_debug
            s3_debug(error)
        
        # Convert to SVG
        title = attr.get("title", resource._ids[0])
        filename = "%s.svg" % title
        filepath = self.write_file(filename, wkt, **attr)

        # Response headers
        disposition = "attachment; filename=\"%s\"" % filename
        response = current.response
        response.headers["Content-Type"] = contenttype(".svg")
        response.headers["Content-disposition"] = disposition

        stream = open(filepath)
        return response.stream(stream, chunk_size=DEFAULT_CHUNK_SIZE,
                               request=current.request)
예제 #23
0
def download():
    from gluon.contenttype import contenttype
    import os
    path = os.path.join(fileupload.path, fileupload.filename)
    headers = response.headers
    headers['Content-Type'] = contenttype(fileupload.filename)
    headers[
        'Content-Disposition'] = 'attachment; filename="%s"' % fileupload.filename
    return response.stream(path)
예제 #24
0
 def GET(printid, fetch=None):
     status = get_printjob_status(printid)
     if status == 201 and fetch:
         f = open(os.path.join(request.folder, "printjobs", printid, "download.pdf"))
         response.headers["Content-Type"] = contenttype(".pdf")
         response.headers["Content-disposition"] = "attachment; filename=download.pdf"
         return response.stream(f, chunk_size=64 * 1024)
     else:
         raise HTTP(status)
예제 #25
0
def series_export_basic():
    prefix = "survey"
    resourcename = "series"
    tablename = "%s_%s" % (prefix, resourcename)
    s3mgr.load(tablename)
    crud_strings = response.s3.crud_strings[tablename]
    
    if len(request.args) == 1:
        series_id = request.args[0]
        questions = response.s3.survey_getAllQuestionsForSeries(series_id)
        try:
            import xlwt
        except ImportError:
            output = s3_rest_controller(prefix,
                                    resourcename,
                                    rheader=response.s3.survey_series_rheader)
            return output
        
        COL_WIDTH_MULTIPLIER = 864
        book = xlwt.Workbook(encoding="utf-8")
        sheet1 = book.add_sheet(T("Assignment"))
        output = StringIO()

        styleHeader = xlwt.XFStyle()
        styleHeader.font.bold = True

        row = 1
        sheet1.write(0, 0, unicode(T("Code")), style=styleHeader)
        sheet1.write(0, 1, unicode(T("Question")), style=styleHeader)
        sheet1.write(0, 2, unicode(T("Answer")), style=styleHeader)
        sheet1.write(0, 3, unicode(T("Notes")), style=styleHeader)
        section = ""
        for question in questions:
            if question["section"] != section:
                section = question["section"]
                sheet1.write_merge(row, row, 0, 3, section, style=styleHeader)
                row += 1
            sheet1.write(row, 0, question["code"])
            sheet1.write(row, 1, question["name"])
            sheet1.write(row, 3, question["type"])
            width=len(unicode(question["name"]))*COL_WIDTH_MULTIPLIER
            sheet1.col(1).width = width
            row += 1
        book.save(output)
        output.seek(0)
        response.headers["Content-Type"] = contenttype(".xls")
        seriesName = response.s3.survey_getSeriesName(series_id)
        filename = "%s.xls" % seriesName
        response.headers["Content-disposition"] = "attachment; filename=\"%s\"" % filename
        return output.read()
    else:
        output = s3_rest_controller(prefix,
                                    resourcename,
                                    rheader=response.s3.survey_series_rheader)
        return output
예제 #26
0
        def create_spreadsheet(self, Strings):
            """
                Function to create a spreadsheet (.xls file) of strings with
                location, original string and translated string as columns
            """

            try:
                from cStringIO import StringIO    # Faster, where available
            except:
                from StringIO import StringIO
            import xlwt

            from gluon.contenttype import contenttype

            # Define spreadsheet properties
            wbk = xlwt.Workbook("utf-8")
            sheet = wbk.add_sheet("Translate")
            style = xlwt.XFStyle()
            font = xlwt.Font()
            font.name = "Times New Roman"
            style.font = font

            sheet.write(0, 0, "location", style)
            sheet.write(0, 1, "source", style)
            sheet.write(0, 2, "target", style)

            row_num = 1

            # Write the data to spreadsheet
            for (loc, d1, d2) in Strings:
                d2 = d2.decode("string-escape").decode("utf-8")
                sheet.write(row_num, 0, loc, style)
                sheet.write(row_num, 1, d1, style)
                sheet.write(row_num, 2, d2, style)
                row_num += 1

            # Set column width
            for colx in range(0, 3):
                sheet.col(colx).width = 15000

            # Initialize output
            output = StringIO()

            # Save the spreadsheet
            wbk.save(output)

            # Modify headers to return the xls file for download
            filename = "trans.xls"
            disposition = "attachment; filename=\"%s\"" % filename
            response = current.response
            response.headers["Content-Type"] = contenttype(".xls")
            response.headers["Content-disposition"] = disposition

            output.seek(0)
            return output.read()
예제 #27
0
파일: s3translate.py 프로젝트: ursea/eden
    def create_spreadsheet(self, Strings):
        """
                Function to create a spreadsheet (.xls file) of strings with
                location, original string and translated string as columns
            """

        try:
            from cStringIO import StringIO  # Faster, where available
        except:
            from StringIO import StringIO
        import xlwt

        from gluon.contenttype import contenttype

        # Define spreadsheet properties
        wbk = xlwt.Workbook("utf-8")
        sheet = wbk.add_sheet("Translate")
        style = xlwt.XFStyle()
        font = xlwt.Font()
        font.name = "Times New Roman"
        style.font = font

        sheet.write(0, 0, "location", style)
        sheet.write(0, 1, "source", style)
        sheet.write(0, 2, "target", style)

        row_num = 1

        # Write the data to spreadsheet
        for (loc, d1, d2) in Strings:
            d2 = d2.decode("string-escape").decode("utf-8")
            sheet.write(row_num, 0, loc, style)
            sheet.write(row_num, 1, d1, style)
            sheet.write(row_num, 2, d2, style)
            row_num += 1

        # Set column width
        for colx in range(0, 3):
            sheet.col(colx).width = 15000

        # Initialize output
        output = StringIO()

        # Save the spreadsheet
        wbk.save(output)

        # Modify headers to return the xls file for download
        filename = "trans.xls"
        disposition = "attachment; filename=\"%s\"" % filename
        response = current.response
        response.headers["Content-Type"] = contenttype(".xls")
        response.headers["Content-disposition"] = disposition

        output.seek(0)
        return output.read()
예제 #28
0
파일: survey.py 프로젝트: openincident/eden
def series_export_formatted():
    s3mgr.load("survey_series")
    s3mgr.load("survey_complete")
    # Check that the series_id has been passed in
    if len(request.args) != 1:
        output = s3_rest_controller(module, resourcename, rheader=response.s3.survey_series_rheader)
        return output
    series_id = request.args[0]
    vars = current.request.post_vars
    seriesName = response.s3.survey_getSeriesName(series_id)
    series = response.s3.survey_getSeries(series_id)
    if not series.logo:
        logo = None
    else:
        if "Export_Spreadsheet" in vars:
            ext = "bmp"
        else:
            ext = "png"
        logo = os.path.join(request.folder, "uploads", "survey", "logo", "%s.%s" % (series.logo, ext))
        if not os.path.exists(logo) or not os.path.isfile(logo):
            logo = None
    # Get the translation dictionary
    langDict = dict()
    if "translationLanguage" in request.post_vars:
        lang = request.post_vars.translationLanguage
        if lang == "Default":
            langDict = dict()
        else:
            try:
                lang_fileName = "applications/%s/uploads/survey/translations/%s.py" % (request.application, lang)
                langDict = read_dict(lang_fileName)
            except:
                langDict = dict()
    if "Export_Spreadsheet" in vars:
        (matrix, matrixAnswers) = series_prepare_matrix(series_id, series, logo, langDict, justified=True)
        output = series_export_spreadsheet(matrix, matrixAnswers, logo)
        filename = "%s.xls" % seriesName
        contentType = ".xls"
    elif "Export_Word" in vars:
        template = response.s3.survey_getTemplateFromSeries(series_id)
        template_id = template.id
        title = "%s (%s)" % (series.name, template.name)
        title = survey_T(title, langDict)
        widgetList = response.s3.survey_getAllWidgetsForTemplate(template_id)
        output = series_export_word(widgetList, langDict, title, logo)
        filename = "%s.rtf" % seriesName
        contentType = ".rtf"
    else:
        output = s3_rest_controller(module, resourcename, rheader=response.s3.survey_series_rheader)
        return output
    output.seek(0)
    response.headers["Content-Type"] = contenttype(contentType)
    response.headers["Content-disposition"] = 'attachment; filename="%s"' % filename
    return output.read()
예제 #29
0
파일: admin.py 프로젝트: wulliam/blogitizor
def export():
    from gluon.contenttype import contenttype
    response.headers['Content-Type'] = contenttype('.csv')
    response.headers['Content-disposition'] = 'attachment; filename=%s_%s_%s_%s%s_blogitizor.csv' % (
        request.now.year, request.now.month, request.now.day,
        request.now.hour, request.now.minute
    )
    import csv, cStringIO
    s = cStringIO.StringIO()
    db.export_to_csv_file(s, delimiter=',', quotechar='"', quoting=csv.QUOTE_NONNUMERIC)
    return s.getvalue()
예제 #30
0
def backupvertical():
    import cStringIO
    from gluon import contenttype
    stream=cStringIO.StringIO()
    vertical = Vertical(request.args(0, cast = int))
    print >> stream, 'TABLE plints'
    db(db.plints.vertical == vertical.index).select(orderby=db.plints.id).export_to_csv_file(stream)
    print >> stream, '\n\nEND'
    response.headers['Content-Type'] = contenttype.contenttype('.csv')
    filename = 'cross-%s-vertical-%s-%s.csv' % (vertical.cross.title, vertical.title, request.now.date())
    response.headers['Content-disposition'] = 'attachment; filename=' + filename.replace(' ', '_')
    return stream.getvalue()
예제 #31
0
    def download(
            self, request, db, chunk_size=DEFAULT_CHUNK_SIZE, attachment=True,
            download_filename=None):
        """
        Adapted from Response.download.

        request.args: path to image file, the last item is the image filename.
        request.vars.size: string, one of SIZES. If provided the image is
            streamed from a subdirectory with that name.
        request.vars.cache: boolean, if set, set response headers to
            enable caching.
        """
        current.session.forget(current.response)

        if not request.args:
            raise HTTP(404)
        name = request.args[-1]
        # W1401 (anomalous-backslash-in-string): *Anomalous backslash in string
        # pylint: disable=W1401
        items = re.compile('(?P<table>.*?)\.(?P<field>.*?)\..*')\
            .match(name)
        if not items:
            raise HTTP(404)
        (t, f) = (items.group('table'), items.group('field'))
        try:
            field = db[t][f]
        except AttributeError:
            raise HTTP(404)
        try:
            (filename, stream) = field.retrieve(name, nameonly=True)
        except (IOError, TypeError):
            raise HTTP(404)

        # Customization: start
        if request.vars.size and request.vars.size in SIZES \
                and request.vars.size != 'original':
            resized = filename_for_size(stream, request.vars.size)
            if os.path.exists(resized):
                stream = resized
        # Customization: end

        headers = self.headers
        headers['Content-Type'] = contenttype(name)
        if download_filename is None:
            download_filename = filename
        if attachment:
            fmt = 'attachment; filename="%s"'
            headers['Content-Disposition'] = \
                fmt % download_filename.replace('"', '\"')
        if request.vars.cache:
            headers['Cache-Control'] = 'max-age=315360000, public'
            headers['Expires'] = 'Thu, 31 Dec 2037 23:59:59 GMT'
        return self.stream(stream, chunk_size=chunk_size, request=request)
예제 #32
0
파일: cam.py 프로젝트: SEA000/uw-empathica
def export_string():
    map_id = request.args(0)
    hash = ''
    if 'hash' in request.vars: hash = request.vars['hash']
    if access_denied(map_id, auth.user.id, hash):
        session.flash=T("You do not have permissions required to use this function!")
        redirect(request.wsgi.environ['HTTP_REFERER'])
        
    cam = db.Map[map_id]
    response.headers['Content-Type'] = contenttype('.txt')
    response.headers['Content-disposition'] = 'attachment; filename=' + remove_restricted(cam.title) +'.empathica'
    return cam.save_string  
예제 #33
0
def planilla_prestamo():
    from StringIO import StringIO
    from reporte.planilla_control import planilla_control
    from gluon.contenttype import contenttype

    out = StringIO()
    report = planilla_control(out, request.args(-1))
    report.render()
    data = out.getvalue()
    out.close()
    response.headers['Content-Type'] = contenttype('.pdf')
    return data
예제 #34
0
def database_backup():
    import cStringIO

    s = cStringIO.StringIO()
    db.export_to_csv_file(s)

    from gluon.contenttype import contenttype
    from datetime import datetime

    fname = "backup %s.csv" % datetime.now().strftime("%d-%m-%y %H-%M-%S")
    response.headers["Content-Type"] = contenttype(".csv")
    response.headers["Content-disposition"] = "attachment; ; filename=%s" % fname
    return s.getvalue()
예제 #35
0
    def testTypeRecognition(self):
        rtn = contenttype('.png')
        self.assertEqual(rtn, 'image/png')
        rtn = contenttype('.gif')
        self.assertEqual(rtn, 'image/gif')
        rtn = contenttype('.tar.bz2')
        self.assertEqual(rtn, 'application/x-bzip-compressed-tar')
        # test overrides and additions
        mapping = {
            '.load': 'text/html; charset=utf-8',
            '.json': 'application/json',
            '.jsonp': 'application/jsonp',
            '.pickle': 'application/python-pickle',
            '.w2p': 'application/w2p',
            '.md': 'text/x-markdown; charset=utf-8'
        }
        for k, v in iteritems(mapping):
            self.assertEqual(contenttype(k), v)

        # test without dot extension
        rtn = contenttype('png')
        self.assertEqual(rtn, 'text/plain; charset=utf-8')
예제 #36
0
파일: base.py 프로젝트: anndream/next2web
 def set_files(self, step, files):
     from gluon.contenttype import contenttype
     if step not in self.data[self.FILE_KEY]:
         self.data[self.FILE_KEY][step] = {}
     
     for field, (filename, data)  in (files or {}).iteritems():
         tmp_file = temp_store_file(data)
         file_dict = {
             'tmp_name' : tmp_file,
             'filename': filename,
             'content-type': contenttype(filename)
         }
         self.data[self.KEY][step][field] = file_dict
예제 #37
0
    def testTypeRecognition(self):
        rtn = contenttype('.png')
        self.assertEqual(rtn, 'image/png')
        rtn = contenttype('.gif')
        self.assertEqual(rtn, 'image/gif')
        rtn = contenttype('.tar.bz2')
        self.assertEqual(rtn, 'application/x-bzip-compressed-tar')
        # test overrides and additions
        mapping = {
            '.load': 'text/html; charset=utf-8',
            '.json': 'application/json',
            '.jsonp': 'application/jsonp',
            '.pickle': 'application/python-pickle',
            '.w2p': 'application/w2p',
            '.md': 'text/x-markdown; charset=utf-8'
        }
        for k, v in iteritems(mapping):
            self.assertEqual(contenttype(k), v)

        # test without dot extension
        rtn = contenttype('png')
        self.assertEqual(rtn, 'text/plain; charset=utf-8')
예제 #38
0
    def download(self,
                 request,
                 db,
                 chunk_size=DEFAULT_CHUNK_SIZE,
                 attachment=True,
                 download_filename=None):
        """
        Example of usage in controller::

            def download():
                return response.download(request, db)

        Downloads from http://..../download/filename
        """
        from pydal.exceptions import NotAuthorizedException, NotFoundException

        current.session.forget(current.response)

        if not request.args:
            raise HTTP(404)
        name = request.args[-1]
        items = re.compile('(?P<table>.*?)\.(?P<field>.*?)\..*').match(name)
        if not items:
            raise HTTP(404)
        (t, f) = (items.group('table'), items.group('field'))
        try:
            field = db[t][f]
        except AttributeError:
            raise HTTP(404)
        try:
            (filename, stream) = field.retrieve(name, nameonly=True)
        except NotAuthorizedException:
            raise HTTP(403)
        except NotFoundException:
            raise HTTP(404)
        except IOError:
            raise HTTP(404)
        headers = self.headers
        headers['Content-Type'] = contenttype(name)
        if download_filename is None:
            download_filename = filename
        if attachment:
            # Browsers still don't have a simple uniform way to have non ascii
            # characters in the filename so for now we are percent encoding it
            if isinstance(download_filename, unicodeT):
                download_filename = download_filename.encode('utf-8')
            download_filename = urllib_quote(download_filename)
            headers['Content-Disposition'] = \
                'attachment; filename="%s"' % download_filename.replace('"', '\"')
        return self.stream(stream, chunk_size=chunk_size, request=request)
예제 #39
0
def thumb():
    if not request.args(2):
        raise HTTP(404, "Image Not Found")
    del response.headers['Cache-Control']
    del response.headers['Pragma']
    del response.headers['Expires']
    response.headers['Cache-Control'] = "max-age=3600"

    import os.path
    import gluon.contenttype as c
    try:
        size_x = int(request.args(0))
        size_y = int(request.args(1))
    except:
        raise HTTP(400, "Invalid Image Dementions")
        
        
    request_path = os.path.join(request.folder, 'uploads','thumb', "%d_%d_%s" % (size_x, size_y, request.args(2)))
    request_sorce_path = os.path.join(request.folder, 'uploads', request.args(2))
    
    if os.path.exists(request_path):
        response.headers['Content-Type'] = c.contenttype(request_path) 
        return response.stream(open(request_path, 'rb'))
    
    elif os.path.exists(request_sorce_path):
        import Image
        thumb = Image.open(request_sorce_path)
        thumb.thumbnail((size_x,size_y), Image.ANTIALIAS)
        try:
            thumb.save(request_path)
        except KeyError:
            thumb.save(request_path, "JPEG")
        
        response.headers['Content-Type'] = c.contenttype(request_path) 
        return response.stream(open(request_path, 'rb'))
    else:
        raise HTTP(404, "Image not found")
예제 #40
0
파일: survey.py 프로젝트: darthdweller/eden
def export_all_responses():
    s3mgr.load("survey_series")
    s3mgr.load("survey_complete")
    try:
        import xlwt
    except ImportError:
        output = s3_rest_controller(module,
                                resourcename,
                                rheader=response.s3.survey_series_rheader)
        return output
    series_id = request.args[0]
    seriesName = response.s3.survey_getSeriesName(series_id)
    filename = "%s_All_responses.xls" % seriesName
    contentType = ".xls"
    output = StringIO()
    book = xlwt.Workbook(encoding="utf-8")
    sheet1 = book.add_sheet(T("Responses"))
    # get all questions and write out as a heading
    col = 0
    completeRow = {}
    nextRow = 2
    qstnList = response.s3.survey_getAllQuestionsForSeries(series_id)
    for qstn in qstnList:
        row = 0
        sheet1.write(row,col,qstn["code"])
        row += 1
        sheet1.write(row,col,qstn["name"])
        # for each question get the response
        allResponses = response.s3.survey_getAllAnswersForQuestionInSeries(qstn["qstn_id"], series_id)
        for answer in allResponses:
            value = answer["value"]
            complete_id = answer["complete_id"]
            if complete_id in completeRow:
                row = completeRow[complete_id]
            else:
                completeRow[complete_id] = nextRow
                row = nextRow
                nextRow += 1
            sheet1.write(row,col,value)
        col += 1
    sheet1.panes_frozen = True
    sheet1.horz_split_pos = 2
    book.save(output)


    output.seek(0)
    response.headers["Content-Type"] = contenttype(contentType)
    response.headers["Content-disposition"] = "attachment; filename=\"%s\"" % filename
    return output.read()
예제 #41
0
def export_string():
    map_id = request.args(0)
    hash = ''
    if 'hash' in request.vars: hash = request.vars['hash']
    if access_denied(map_id, auth.user.id, hash):
        session.flash = T(
            "You do not have permissions required to use this function!")
        redirect(request.wsgi.environ['HTTP_REFERER'])

    cam = db.Map[map_id]
    response.headers['Content-Type'] = contenttype('.txt')
    response.headers[
        'Content-disposition'] = 'attachment; filename=' + remove_restricted(
            cam.title) + '.empathica'
    return cam.save_string
예제 #42
0
def imprimir_cc():
    # Imprimir Reporte Cuotas Cobradas.
    from StringIO import StringIO
    from reporte.cuotas_cobradas import cuotas_cobradas
    from gluon.contenttype import contenttype

    out = StringIO()
    report = cuotas_cobradas(out, **request.vars)
    report.render()
    data = out.getvalue()
    out.close()
    response.headers['Content-Type'] = contenttype('.pdf')
    return data

    
예제 #43
0
파일: evidence.py 프로젝트: 001001/Kvasir
def download():
    import gluon.contenttype as cc
    f_evidence =request.args[0]

    row=db(db.t_evidence.f_evidence==f_evidence).select(db.t_evidence.f_data, db.t_evidence.f_filename, db.t_evidence.f_evidence).first()

    response.headers['Content-Type']=cc.contenttype(f_evidence)
    # convert unknowns (x-XXXX) into text/plain
    if "/x-" in response.headers['Content-Type']:
        response.headers['Content-Type'].replace('x-log', 'plain')
    response.headers['Content-Disposition'] = "attachment; filename=%s" % (row.f_filename)
    response.headers['Content-Type']='text/plain'
    if row.f_data is not None:
        return row.f_data
    else:
        return ""
예제 #44
0
def download():
    import os
    dbname = request.args[0]
    ### for GAE only ###
    tablename, fieldname = request.args[1].split('.')[:2]
    uploadfield = eval(dbname)[tablename][fieldname].uploadfield
    filename = request.args[1]
    if isinstance(uploadfield, str):
        from gluon.contenttype import contenttype
        response.headers['Content-Type'] = contenttype(filename)
        db = eval(dbname)
        rows = db(db[tablename][fieldname] == filename).select()
        return rows[0][uploadfield]
    ### end for GAE ###
    filename = os.path.join(request.folder, 'uploads/', filename)
    return response.stream(open(filename, 'rb'))
예제 #45
0
def download():
    import gluon.contenttype as cc
    f_evidence =request.args[0]

    row=db(db.t_evidence.f_evidence==f_evidence).select(db.t_evidence.f_data, db.t_evidence.f_filename, db.t_evidence.f_evidence).first()

    response.headers['Content-Type']=cc.contenttype(f_evidence)
    # convert unknowns (x-XXXX) into text/plain
    if "/x-" in response.headers['Content-Type']:
        response.headers['Content-Type'].replace('x-log', 'plain')
    response.headers['Content-Disposition'] = "attachment; filename=%s" % (row.f_filename)
    response.headers['Content-Type']='text/plain'
    if row.f_data is not None:
        return row.f_data
    else:
        return ""
예제 #46
0
def download():
    import os
    dbname=request.args[0]
    ### for GAE only ###
    tablename,fieldname=request.args[1].split('.')[:2]
    uploadfield=eval(dbname)[tablename][fieldname].uploadfield
    filename=request.args[1]
    if isinstance(uploadfield,str):
        from gluon.contenttype import contenttype
        response.headers['Content-Type']=contenttype(filename)
        db=eval(dbname)
        rows=db(db[tablename][fieldname]==filename).select()
        return rows[0][uploadfield]
    ### end for GAE ###
    filename=os.path.join(request.folder,'uploads/',filename)
    return response.stream(open(filename,'rb'))
예제 #47
0
def download():
    from gluon.contenttype import contenttype
    filename = request.args[0]
    try:
        type = request.args[1]
    except:
        type = None

    response.headers['Content-Type'] = contenttype(filename)
    if type:
        return open(
            os.path.join(request.folder, 'uploads/',
                         '%s/%s' % (type, filename)), 'rb').read()
    else:
        return open(os.path.join(request.folder, 'uploads/', '%s' % filename),
                    'rb').read()
예제 #48
0
def get_results():
    from gluon.contenttype import contenttype
    if request.args:
        entry = request.args[0]
    else:
        raise HTTP(404, 'not found')

    row = db(db.projects.id == entry).select().first()
    file_path = os.path.join(row["project_path"], row["name"],
                             row["name"] + '.zip')

    ext = os.path.splitext(file_path)
    response.headers['Content-Type'] = contenttype('zip')
    response.headers['Content-disposition'] = 'attachment; filename=%s' % row[
        "name"] + '.zip'
    res = response.stream(open(file_path, "rb"), chunk_size=4096)
    return res
예제 #49
0
파일: globals.py 프로젝트: Nuevalgo/Feedbot
 def f(_action=action, _self=self, *a, **b):
     self.is_restful = True
     method = _self.env.request_method
     if len(_self.args) and "." in _self.args[-1]:
         _self.args[-1], _, self.extension = self.args[-1].rpartition(".")
         current.response.headers["Content-Type"] = contenttype("." + _self.extension.lower())
     rest_action = _action().get(method, None)
     if not (rest_action and method == method.upper() and callable(rest_action)):
         raise HTTP(400, "method not supported")
     try:
         return rest_action(*_self.args, **getattr(_self, "vars", {}))
     except TypeError, e:
         exc_type, exc_value, exc_traceback = sys.exc_info()
         if len(traceback.extract_tb(exc_traceback)) == 1:
             raise HTTP(400, "invalid arguments")
         else:
             raise e
예제 #50
0
파일: globals.py 프로젝트: web2py/web2py
    def download(self, request, db, chunk_size=DEFAULT_CHUNK_SIZE, attachment=True, download_filename=None):
        """
        Example of usage in controller::

            def download():
                return response.download(request, db)

        Downloads from http://..../download/filename
        """
        from pydal.helpers.regex import REGEX_UPLOAD_PATTERN
        from pydal.exceptions import NotAuthorizedException, NotFoundException

        current.session.forget(current.response)

        if not request.args:
            raise HTTP(404)
        name = request.args[-1]
        items = re.match(REGEX_UPLOAD_PATTERN, name)
        if not items:
            raise HTTP(404)
        t = items.group('table'); f = items.group('field')
        try:
            field = db[t][f]
        except (AttributeError, KeyError):
            raise HTTP(404)
        try:
            (filename, stream) = field.retrieve(name, nameonly=True)
        except NotAuthorizedException:
            raise HTTP(403)
        except NotFoundException:
            raise HTTP(404)
        except IOError:
            raise HTTP(404)
        headers = self.headers
        headers['Content-Type'] = contenttype(name)
        if download_filename is None:
            download_filename = filename
        if attachment:
            # Browsers still don't have a simple uniform way to have non ascii
            # characters in the filename so for now we are percent encoding it
            if isinstance(download_filename, unicodeT):
                download_filename = download_filename.encode('utf-8')
            download_filename = urllib_quote(download_filename)
            headers['Content-Disposition'] = \
                'attachment; filename="%s"' % download_filename.replace('"', '\\"')
        return self.stream(stream, chunk_size=chunk_size, request=request)
예제 #51
0
 def f(_action=action, _self=self, *a, **b):
     self.is_restful = True
     method = _self.env.request_method
     if len(_self.args) and '.' in _self.args[-1]:
         _self.args[-1], _, self.extension = self.args[-1].rpartition('.')
         current.response.headers['Content-Type'] = \
             contenttype('.' + _self.extension.lower())
     rest_action = _action().get(method, None)
     if not (rest_action and method==method.upper() 
             and callable(rest_action)):
         raise HTTP(400, "method not supported")
     try:
         return rest_action(*_self.args, **getattr(_self,'vars',{}))
     except TypeError, e:
         exc_type, exc_value, exc_traceback = sys.exc_info()
         if len(traceback.extract_tb(exc_traceback)) == 1:
             raise HTTP(400, "invalid arguments")
         else:
             raise e
예제 #52
0
파일: images.py 프로젝트: zsw/zcomix.com
    def download(self, request, db, chunk_size=DEFAULT_CHUNK_SIZE, attachment=True, download_filename=None):
        """
        Adapted from Response.download.

        request.vars.size: string, one of 'original' (default), 'medium', or
                'thumb'. If provided the image is streamed from a subdirectory
                 with that name.
        """
        current.session.forget(current.response)

        if not request.args:
            raise HTTP(404)
        name = request.args[-1]
        items = re.compile('(?P<table>.*?)\.(?P<field>.*?)\..*')\
            .match(name)
        if not items:
            raise HTTP(404)
        (t, f) = (items.group('table'), items.group('field'))
        try:
            field = db[t][f]
        except AttributeError:
            raise HTTP(404)
        try:
            (filename, stream) = field.retrieve(name, nameonly=True)
        except IOError:
            raise HTTP(404)

        # Customization: start
        if request.vars.size and request.vars.size in UploadImage.sizes:
            resized = stream.replace('/original/', '/{s}/'.format(s=request.vars.size))
            if os.path.exists(resized):
                stream = resized
        # Customization: end

        headers = self.headers
        headers['Content-Type'] = contenttype(name)
        if download_filename is None:
            download_filename = filename
        if attachment:
            headers['Content-Disposition'] = \
                'attachment; filename="%s"' % download_filename.replace('"', '\"')
        return self.stream(stream, chunk_size=chunk_size, request=request)
예제 #53
0
 def f(_action=action, _self=self, *a, **b):
     self.is_restful = True
     method = _self.env.request_method or 'GET'
     if len(_self.args) and '.' in _self.args[-1]:
         _self.args[-1], _self.extension = _self.args[-1].rsplit('.', 1)
         current.response.headers['Content-Type'] = \
             contenttype(_self.extension.lower())
     if not method in ['GET', 'POST', 'DELETE', 'PUT']:
         raise HTTP(400, "invalid method")
     rest_action = _action().get(method, None)
     if not rest_action:
         raise HTTP(400, "method not supported")
     try:
         return rest_action(*_self.args, **_self.vars)
     except TypeError, e:
         exc_type, exc_value, exc_traceback = sys.exc_info()
         if len(traceback.extract_tb(exc_traceback)) == 1:
             raise HTTP(400, "invalid arguments")
         else:
             raise e
예제 #54
0
 def __init__(
         self,
         payload,
         filename=None,
         content_id=None,
         content_type=None):
     if isinstance(payload, str):
         if filename is None:
             filename = os.path.basename(payload)
         payload = read_file(payload, 'rb')
     else:
         if filename is None:
             raise Exception('Missing attachment name')
         payload = payload.read()
     self.transmission_dict = {
         'type': content_type or contenttype(filename),
         'name': filename,
         'data': base64.b64encode(payload).decode("ascii")
     }
     self.is_inline = False
     if content_id is not None:
         self.is_inline = True
         self.transmission_dict['name'] = content_id
예제 #55
0
def csvExport():
    scriptId = 1
	#rows = db(query,ignore_common_filters=True).select()
    rows2 = db(db.bot_storage.bot_id == 11).select(db.bot_storage.storage_owner,distinct=True)
    rows = db(db.bot_storage.bot_id == 11).select(db.bot_storage.storage_key, db.bot_storage.storage_owner, db.bot_storage.storage_value, limitby=(0,2000))
    from gluon.contenttype import contenttype
    response.headers['Content-Type'] = contenttype('.csv')
    response.headers['Content-disposition'] = 'attachment; filename=export_%s.csv' % (scriptId)
    import csv, cStringIO
    s = cStringIO.StringIO()
    myList2=[]
    data = db(db.bot_storage.bot_id == 11).select(db.bot_storage.storage_key, distinct=True)
    myList=[]
    for value in data:
        myList.append(value.storage_key)
    myList2.append(myList)
    for rowowner in rows2:
        myList=[]
        for value in data:
            dato = db((db.bot_storage.bot_id == 11) & (db.bot_storage.storage_owner == rowowner.storage_owner) & (db.bot_storage.storage_key == value.storage_key)).select(db.bot_storage.storage_value).first()
            if dato != None:
                myList.append(dato.storage_value)
            else:
                myList.append(" ")
        myList2.append(myList)
    import numpy as np
    myarray = np.array(myList2)
    rows2.export_to_csv_file(s, delimiter=',', quotechar='"', quoting=csv.QUOTE_NONNUMERIC)
    import csv

    myFile = open('/opt/web2py_apps/web2py.production/applications/backend/static/example4.csv', 'w')
    with myFile:
        writer = csv.writer(myFile)
        writer.writerows(myList2)

    print("Writing complete")
    return s.getvalue()
예제 #56
0
def export_CSV():
    export_status = request.args[1] + ' ' + request.args[
        2]  #eg. /FB_leads/SMS/Pending = download from Fb_leads all with SMS Pending status
    if (request.args[0] == "Fb_leads"):
        if (request.args[1] == "None"):
            status_query = db.Fb_leads.Final_Status == None
        else:
            status_query = db.Fb_leads.Final_Status == export_status
    if (request.args[0] == "naukri_leads"):
        if (request.args[1] == "None"):
            status_query = db.naukri_leads.Final_Status == None
        else:
            status_query = db.naukri_leads.Final_Status == export_status
    rows = db(status_query).select()
    response.headers['Content-Type'] = contenttype('.csv')
    response.headers[
        'Content-disposition'] = 'attachment; filename=export_{}_{}.csv'.format(
            request.args[0], request.args[1])
    s = cStringIO.StringIO()
    rows.export_to_csv_file(s,
                            delimiter=',',
                            quotechar='"',
                            quoting=csv.QUOTE_NONNUMERIC)
    return s.getvalue()
예제 #57
0
파일: xls.py 프로젝트: nursix/eden-core
    def encode(self, resource, **attr):
        """
            Export data as a Microsoft Excel spreadsheet

            @param resource: the source of the data that is to be encoded
                             as a spreadsheet, can be either of:
                                1) an S3Resource
                                2) an array of value dicts (dict of
                                   column labels as first item, list of
                                   field types as second item)
                                3) a dict like:
                                   {columns: [key, ...],
                                    headers: {key: label},
                                    types: {key: type},
                                    rows: [{key:value}],
                                    }

            @param attr: keyword arguments (see below)

            @keyword as_stream: return the buffer (BytesIO) rather than
                                its contents (str), useful when the output
                                is supposed to be stored locally
            @keyword title: the main title of the report
            @keyword list_fields: fields to include in list views
            @keyword report_groupby: used to create a grouping of the result:
                                     either a Field object of the resource
                                     or a string which matches a value in
                                     the heading
            @keyword use_colour: True to add colour to the cells, default False
            @keyword evenodd: render different background colours
                              for even/odd rows ("stripes")
        """

        # Do not redirect from here!
        # ...but raise proper status code, which can be caught by caller
        try:
            import xlwt
        except ImportError:
            error = self.ERROR.XLWT_ERROR
            current.log.error(error)
            raise HTTP(503, body=error)
        try:
            from xlrd.xldate import xldate_from_date_tuple, \
                                    xldate_from_time_tuple, \
                                    xldate_from_datetime_tuple
        except ImportError:
            error = self.ERROR.XLRD_ERROR
            current.log.error(error)
            raise HTTP(503, body=error)

        import datetime

        MAX_CELL_SIZE = self.MAX_CELL_SIZE
        COL_WIDTH_MULTIPLIER = self.COL_WIDTH_MULTIPLIER

        # Get the attributes
        title = attr.get("title")
        if title is None:
            title = current.T("Report")
        list_fields = attr.get("list_fields")
        group = attr.get("dt_group")
        use_colour = attr.get("use_colour", False)
        evenodd = attr.get("evenodd", True)

        # Extract the data from the resource
        if isinstance(resource, dict):
            headers = resource.get("headers", {})
            lfields = resource.get("columns", list_fields)
            column_types = resource.get("types")
            types = [column_types[col] for col in lfields]
            rows = resource.get("rows")
        elif isinstance(resource, (list, tuple)):
            headers = resource[0]
            types = resource[1]
            rows = resource[2:]
            lfields = list_fields
        else:
            if not list_fields:
                list_fields = resource.list_fields()
            (title, types, lfields, headers, rows) = self.extract(resource,
                                                                  list_fields,
                                                                  )

        # Verify columns in items
        request = current.request
        if len(rows) > 0 and len(lfields) > len(rows[0]):
            msg = """modules/s3/codecs/xls: There is an error in the list items, a field doesn't exist
requesting url %s
Headers = %d, Data Items = %d
Headers     %s
List Fields %s""" % (request.url, len(lfields), len(rows[0]), headers, lfields)
            current.log.error(msg)

        # Grouping
        report_groupby = lfields[group] if group else None
        groupby_label = headers[report_groupby] if report_groupby else None

        # Date/Time formats from L10N deployment settings
        settings = current.deployment_settings
        date_format = settings.get_L10n_date_format()
        date_format_str = str(date_format)

        dt_format_translate = self.dt_format_translate
        date_format = dt_format_translate(date_format)
        time_format = dt_format_translate(settings.get_L10n_time_format())
        datetime_format = dt_format_translate(settings.get_L10n_datetime_format())

        title_row = settings.get_xls_title_row()

        # Get styles
        styles = self._styles(use_colour = use_colour,
                              evenodd = evenodd,
                              datetime_format = datetime_format,
                              )

        # Create the workbook
        book = xlwt.Workbook(encoding="utf-8")

        # Add sheets
        sheets = []
        # XLS exports are limited to 65536 rows per sheet, we bypass
        # this by creating multiple sheets
        row_limit = 65536
        sheetnum = len(rows) / row_limit
        # Can't have a / in the sheet_name, so replace any with a space
        sheet_name = s3_str(title.replace("/", " "))
        if len(sheet_name) > 28:
            # Sheet name cannot be over 31 chars
            # (take sheet number suffix into account)
            sheet_name = sheet_name[:28]
        count = 1
        while len(sheets) <= sheetnum:
            sheets.append(book.add_sheet("%s-%s" % (sheet_name, count)))
            count += 1

        if callable(title_row):
            # Calling with sheet None to get the number of title rows
            title_row_length = title_row(None)
        else:
            title_row_length = 2

        # Add header row to all sheets, determine columns widths
        header_style = styles["header"]
        for sheet in sheets:
            # Move this down if a title row will be added
            if title_row:
                header_row = sheet.row(title_row_length)
            else:
                header_row = sheet.row(0)
            column_widths = []
            has_id = False
            col_index = 0
            for selector in lfields:
                if selector == report_groupby:
                    continue
                label = headers[selector]
                if label == "Id":
                    # Indicate to adjust col_index when writing out
                    has_id = True
                    column_widths.append(0)
                    col_index += 1
                    continue
                if label == "Sort":
                    continue
                if has_id:
                    # Adjust for the skipped column
                    write_col_index = col_index - 1
                else:
                    write_col_index = col_index
                header_row.write(write_col_index, str(label), header_style)
                width = max(len(label) * COL_WIDTH_MULTIPLIER, 2000)
                width = min(width, 65535) # USHRT_MAX
                column_widths.append(width)
                sheet.col(write_col_index).width = width
                col_index += 1

        title = s3_str(title)

        # Title row (optional, deployment setting)
        if title_row:
            T = current.T
            large_header_style = styles["large_header"]
            notes_style = styles["notes"]
            for sheet in sheets:
                if callable(title_row):
                    # Custom title rows
                    title_row(sheet)
                else:
                    # First row => Title (standard = "title_list" CRUD string)
                    current_row = sheet.row(0)
                    if col_index > 0:
                        sheet.write_merge(0, 0, 0, col_index,
                                          title,
                                          large_header_style,
                                          )
                    current_row.height = 500
                    # Second row => Export date/time
                    current_row = sheet.row(1)
                    current_row.write(0, "%s:" % T("Date Exported"), notes_style)
                    current_row.write(1, request.now, notes_style)
                    # Fix the size of the last column to display the date
                    if 16 * COL_WIDTH_MULTIPLIER > width:
                        sheet.col(col_index).width = 16 * COL_WIDTH_MULTIPLIER

        # Initialize counters
        total_cols = col_index
        # Move the rows down if a title row is included
        if title_row:
            row_index = title_row_length
        else:
            row_index = 0

        # Helper function to get the current row
        def get_current_row(row_count, row_limit):

            sheet_count = int(row_count / row_limit)
            row_number = row_count - (sheet_count * row_limit)
            if sheet_count > 0:
                row_number += 1
            return sheets[sheet_count], sheets[sheet_count].row(row_number)

        # Write the table contents
        subheading = None
        odd_style = styles["odd"]
        even_style = styles["even"]
        subheader_style = styles["subheader"]
        for row in rows:
            # Current row
            row_index += 1
            current_sheet, current_row = get_current_row(row_index, row_limit)
            style = even_style if row_index % 2 == 0 else odd_style

            # Group headers
            if report_groupby:
                represent = s3_strip_markup(s3_unicode(row[report_groupby]))
                if subheading != represent:
                    # Start of new group - write group header
                    subheading = represent
                    current_sheet.write_merge(row_index, row_index, 0, total_cols,
                                             subheading,
                                             subheader_style,
                                             )
                    # Move on to next row
                    row_index += 1
                    current_sheet, current_row = get_current_row(row_index, row_limit)
                    style = even_style if row_index % 2 == 0 else odd_style

            col_index = 0
            remaining_fields = lfields

            # Custom row style?
            row_style = None
            if "_style" in row:
                stylename = row["_style"]
                if stylename in styles:
                    row_style = styles[stylename]

            # Group header/footer row?
            if "_group" in row:
                group_info = row["_group"]
                label = group_info.get("label")
                totals = group_info.get("totals")
                if label:
                    label = s3_strip_markup(s3_unicode(label))
                    style = row_style or subheader_style
                    span = group_info.get("span")
                    if span == 0:
                        current_sheet.write_merge(row_index,
                                                  row_index,
                                                  0,
                                                  total_cols - 1,
                                                  label,
                                                  style,
                                                  )
                        if totals:
                            # Write totals into the next row
                            row_index += 1
                            current_sheet, current_row = \
                                get_current_row(row_index, row_limit)
                    else:
                        current_sheet.write_merge(row_index,
                                                  row_index,
                                                  0,
                                                  span - 1,
                                                  label,
                                                  style,
                                                  )
                        col_index = span
                        remaining_fields = lfields[span:]
                if not totals:
                    continue

            for field in remaining_fields:
                label = headers[field]
                if label == groupby_label:
                    continue
                if label == "Id":
                    # Skip the ID column from XLS exports
                    col_index += 1
                    continue

                if field not in row:
                    represent = ""
                else:
                    represent = s3_strip_markup(s3_unicode(row[field]))

                coltype = types[col_index]
                if coltype == "sort":
                    continue
                if len(represent) > MAX_CELL_SIZE:
                    represent = represent[:MAX_CELL_SIZE]
                value = represent
                if coltype == "date":
                    try:
                        cell_datetime = datetime.datetime.strptime(value,
                                                                   date_format_str)
                        date_tuple = (cell_datetime.year,
                                      cell_datetime.month,
                                      cell_datetime.day)
                        value = xldate_from_date_tuple(date_tuple, 0)
                        style.num_format_str = date_format
                    except:
                        pass
                elif coltype == "datetime":
                    try:
                        cell_datetime = datetime.datetime.strptime(value,
                                                                   date_format_str)
                        date_tuple = (cell_datetime.year,
                                      cell_datetime.month,
                                      cell_datetime.day,
                                      cell_datetime.hour,
                                      cell_datetime.minute,
                                      cell_datetime.second)
                        value = xldate_from_datetime_tuple(date_tuple, 0)
                        style.num_format_str = datetime_format
                    except:
                        pass
                elif coltype == "time":
                    try:
                        cell_datetime = datetime.datetime.strptime(value,
                                                                   date_format_str)
                        date_tuple = (cell_datetime.hour,
                                      cell_datetime.minute,
                                      cell_datetime.second)
                        value = xldate_from_time_tuple(date_tuple)
                        style.num_format_str = time_format
                    except:
                        pass
                elif coltype == "integer":
                    try:
                        value = int(value)
                        style.num_format_str = "0"
                    except:
                        pass
                elif coltype == "double":
                    try:
                        value = float(value)
                        style.num_format_str = "0.00"
                    except:
                        pass
                if has_id:
                    # Adjust for the skipped column
                    write_col_index = col_index - 1
                else:
                    write_col_index = col_index

                current_row.write(write_col_index, value, style)
                width = len(represent) * COL_WIDTH_MULTIPLIER
                if width > column_widths[col_index]:
                    column_widths[col_index] = width
                    current_sheet.col(write_col_index).width = width
                col_index += 1

        # Additional sheet settings
        for sheet in sheets:
            sheet.panes_frozen = True
            sheet.horz_split_pos = 1

        # Write output
        output = BytesIO()
        book.save(output)
        output.seek(0)

        if attr.get("as_stream", False):
            return output

        # Response headers
        filename = "%s_%s.xls" % (request.env.server_name, title)
        disposition = "attachment; filename=\"%s\"" % filename
        response = current.response
        response.headers["Content-Type"] = contenttype(".xls")
        response.headers["Content-disposition"] = disposition

        return output.read()
예제 #58
0
    def encode(self, data_source, **attr):
        """
            Export data as a Shapefile

            @param data_source: the source of the data that is to be encoded
                                as a shapefile. This may be:
                                resource: the resource
                                item:     a list of pre-fetched values
                                          the headings are in the first row
                                          the data types are in the second row
            @param attr: dictionary of parameters:
                 * title:          The export filename
                 * list_fields:    Fields to include in list views
        """

        # Get the attributes
        title = attr.get("title")

        # Extract the data from the data_source
        if isinstance(data_source, (list, tuple)):
            headers = data_source[0]
            #types = data_source[1]
            items = data_source[2:]
        else:
            current.s3db.gis_location.wkt.represent = None
            list_fields = attr.get("list_fields")
            if not list_fields:
                list_fields = data_source.list_fields()
            if data_source.tablename == "gis_location":
                wkt_field = "wkt"
            else:
                wkt_field = "location_id$wkt"
            if wkt_field not in list_fields:
                list_fields.append(wkt_field)

            (_title, types, lfields, headers,
             items) = self.extractResource(data_source, list_fields)
            if not title:
                title = _title

        # Create the data structure
        output = []
        oappend = output.append

        # Header row
        headers["gis_location.wkt"] = "WKT"
        fields = []
        fappend = fields.append
        header = []
        happend = header.append
        for selector in lfields:
            h = s3_unicode(headers[selector].replace(" ", "_"))
            happend(h)
            if selector != "gis_location.wkt":
                # Don't include the WKT field as an Attribute in the Shapefile
                fappend(h)
        oappend('"%s"' % '","'.join(header))
        fields = ",".join(fields)

        for item in items:
            row = []
            rappend = row.append
            for selector in lfields:
                represent = s3_strip_markup(s3_unicode(item[selector]))
                rappend(represent)
            oappend('"%s"' % '","'.join(row))

        # Write out as CSV
        import tempfile
        web2py_path = os.getcwd()
        if os.path.exists(os.path.join(web2py_path,
                                       "temp")):  # use web2py/temp
            TEMP = os.path.join(web2py_path, "temp")
        else:
            TEMP = tempfile.gettempdir()
        os_handle_temp, temp_filepath = tempfile.mkstemp(dir=TEMP,
                                                         suffix=".csv")
        with open(temp_filepath, "w") as f:
            for line in output:
                f.write("%s\n" % line.encode("utf-8"))

        # Convert to Shapefile
        # @ToDo: migrate to GDAL Python bindings
        # Write out VRT file
        temp_filename = temp_filepath.rsplit(os.path.sep, 1)[1]
        vrt = \
'''<OGRVRTDataSource>
    <OGRVRTLayer name="%s">
        <SrcDataSource>%s</SrcDataSource>
        <GeometryType>wkbGeometryCollection</GeometryType>
        <TargetSRS>EPSG:4326</TargetSRS>
        <GeometryField encoding="WKT" field="WKT"/>
    </OGRVRTLayer>
</OGRVRTDataSource>''' % (temp_filename.rsplit(".", 1)[0], temp_filename)
        os_handle_vrt, vrt_filename = tempfile.mkstemp(dir=TEMP, suffix=".vrt")
        with open(vrt_filename, "w") as f:
            f.write(vrt)
        # @ToDo: Check that the data exists before writing out file
        # Write Points
        os.chdir(TEMP)
        # Use + not %s as % within string
        cmd = 'ogr2ogr -a_srs "EPSG:4326" -f "ESRI Shapefile" ' + title + '_point.shp ' + vrt_filename + ' -select ' + fields + ' -skipfailures -nlt POINT -where "WKT LIKE \'%POINT%\'"'
        #os.system("rm %s_point.*" % title)
        os.system(cmd)
        # Write Lines
        cmd = 'ogr2ogr -a_srs "EPSG:4326" -f "ESRI Shapefile" ' + title + '_line.shp ' + vrt_filename + ' -select ' + fields + ' -skipfailures -nlt MULTILINESTRING -where "WKT LIKE \'%LINESTRING%\'"'
        #os.system("rm %s_line.*" % title)
        os.system(cmd)
        # Write Polygons
        cmd = 'ogr2ogr -a_srs "EPSG:4326" -f "ESRI Shapefile" ' + title + '_polygon.shp ' + vrt_filename + ' -select ' + fields + ' -skipfailures -nlt MULTIPOLYGON -where "WKT LIKE \'%POLYGON%\'"'
        #os.system("rm %s_polygon.*" % title)
        os.system(cmd)
        os.close(os_handle_temp)
        os.unlink(temp_filepath)
        os.close(os_handle_vrt)
        os.unlink(vrt_filename)
        # Zip up
        import zipfile
        request = current.request
        filename = "%s_%s.zip" % (request.env.server_name, title)
        fzip = zipfile.ZipFile(filename, "w")
        for item in ("point", "line", "polygon"):
            for exten in ("shp", "shx", "prj", "dbf"):
                tfilename = "%s_%s.%s" % (title, item, exten)
                fzip.write(tfilename)
                os.unlink(tfilename)
        fzip.close()
        # Restore path
        os.chdir(web2py_path)

        # Response headers
        disposition = "attachment; filename=\"%s\"" % filename
        response = current.response
        response.headers["Content-Type"] = contenttype(".zip")
        response.headers["Content-disposition"] = disposition

        stream = open(os.path.join(TEMP, filename), "rb")
        return response.stream(stream,
                               chunk_size=DEFAULT_CHUNK_SIZE,
                               request=request)