コード例 #1
0
def pvc1_show_imageh5(request):
    # Loads JPEG images from hdf5 file
    h5_image_file = 'pvc1/pvc1_movie_frames.h5'
    movie_id = int(request.matchdict['movie_id'])
    segment_id = int(request.matchdict['segment_id'])
    frame = int(request.matchdict['frame'])
    image_dir = 'movie%03u_%03u.images' % (movie_id, segment_id)
    image_name = 'movie%03u_%03u_%03u.jpeg' % (movie_id, segment_id, frame)
    path = image_dir + '/' + image_name
    response = Response(content_type='image/jpeg')
    h5f = h5py.File(h5_image_file, 'r')
    try:
        ds = h5f[path]
    except KeyError: 
        # missing file, generate an image to return
        img = Image.new("RGB", (320, 220,), "#cccccc"  )
        draw = ImageDraw.Draw(img)
        draw.text((15, 60), image_name + ' missing', fill='#000')
        f = cStringIO.StringIO()
        img.save(f, "jpeg")
        f.seek(0)
        response.app_iter = f
    else:
        dsv = ds.value
        response.app_iter = dsv
    h5f.close()
    return response
コード例 #2
0
def get_file_response(filename, document_root=None, accel_header=None):
    """helper the get a file response"""
    if not os.path.isfile(filename):
        return HTTPNotFound()
    resp = Response(content_type=get_mimetype(filename),
                    conditional_response=True)
    resp.content_length = os.path.getsize(filename)
    resp.last_modified = os.path.getmtime(filename)
    resp.etag = '%s-%s-%s' % (os.path.getmtime(filename),
                              os.path.getsize(filename), hash(filename))
    if accel_header:
        if accel_header.lower() == "x-accel-redirect":
            # return full path
            filename = filename[len(os.path.dirname(document_root)):]
            filename = '/%s' % filename.strip('/')
            resp.headers[accel_header.title()] = filename
        elif accel_header.lower() == "x-sendfile":
            # return full path
            resp.headers[accel_header.title()] = filename
        else:
            raise RuntimeError(
                "Can't find a way to use your %s header" % accel_header)
        resp.app_iter = [b'']
    else:
        resp.app_iter = FileIterable(filename)
    return resp
コード例 #3
0
ファイル: views.py プロジェクト: sokky/gazoBBS
def show_img(request):
    '''画像表示
    '''
    file_name = request.matchdict['file_name']
    md = model(request.db)
    file = md.get_file(file_name)

    response = Response()
    if file is not None:
        response.content_type = file.content_type
        response.app_iter = file
    else:
        response.content_type = 'image/jpeg'
        response.app_iter = open('nopict.jpg', 'rb')
    return response
コード例 #4
0
ファイル: admin.py プロジェクト: sigsergv/pyrone
def download_backup(request):
    encoded_filename = request.matchdict['backup_id']

    headers = []

    try:
        filename = base64.b64decode(encoded_filename).decode('utf-8')
    except TypeError:
        return HTTPNotFound()

    backups_dir = get_backups_dir()
    all_backups = [x for x in os.listdir(backups_dir) if os.path.isfile(os.path.join(backups_dir, x))]
    if filename not in all_backups:
        return HTTPNotFound()

    full_path = os.path.join(backups_dir, filename)
    if not os.path.isfile(full_path):
        return HTTPNotFound()

    headers = []
    content_length = os.path.getsize(full_path)
    headers.append(('Content-Length', str(content_length)))
    headers.append(('Content-Disposition', str('attachment; filename={0}'.format(filename))))

    response = Response(content_type='application/octet-stream')
    try:
        response.app_iter = open(full_path, 'rb')
    except IOError:
        return HTTPNotFound()

    response.headerlist += headers

    return response
コード例 #5
0
ファイル: views.py プロジェクト: bireme/scielobooks
def cover(request):
    sbid = request.matchdict['sbid']

    response_headers = {'content_type': 'image/jpeg',}

    try:
        monograph = request.db.get(sbid)
        if 'thumbnail' in request.path:
            img = request.db.fetch_attachment(monograph,monograph['cover_thumbnail']['filename'], stream=True)
        else:
            img = request.db.fetch_attachment(monograph,monograph['cover']['filename'], stream=True)
        response_headers['expires'] = datetime_rfc822(365)

    except (couchdbkit.ResourceNotFound, KeyError):
        img = urllib2.urlopen(static_url('scielobooks:static/images/fakecover.jpg', request))

    response = Response(**response_headers)
    response.app_iter = img
    try:
        response.etag = str(hash(img))
    except TypeError:
        #cannot generate a hash for the object, return it without the ETag
        pass

    return response
コード例 #6
0
ファイル: views_dynamic.py プロジェクト: higherkuo/chez-betty
def dynamic_item_img(request):
	item = Item.from_id(request.matchdict['item_id'])
	response = Response(content_type='image/jpeg')
	class Hack():
		def __init__(self, img):
			self.img = img
			self.idx = 0
		def getattr(self, item):
			print("item {}".format(item))
			raise AttributeError
		def __getattr(self, item):
			print("item {}".format(item))
			raise AttributeError
		def read(self, block_size = None):
			if self.idx >= len(self.img):
				return ''
			if block_size is None:
				self.idx = len(self.img)
				return self.img
			if self.idx + block_size > len(self.img):
				r = self.img[self.idx:]
				self.idx = len(self.img)
				return r
			else:
				r = self.img[self.idx:block_size]
				self.idx += block_size
				return r
		def close(self):
			pass
	h = Hack(item.img.img)
	response.app_iter = FileIter(h)
	return response
コード例 #7
0
ファイル: airs.py プロジェクト: OpenCIOC/onlineresources
	def __call__(self):
		request = self.request
		user = request.user

		if not user:
			return make_401_error(u'Access Denied')

		if 'airsexport' not in user.cic.ExternalAPIs:
			return make_401_error(u'Insufficient Permissions')

		model_state = modelstate.ModelState(request)
		model_state.schema = AIRSExportOptionsSchema()
		model_state.form.method = None

		if not model_state.validate():
			if model_state.is_error('DST'):
				msg = u"Invalid Distribution"
			elif model_state.is_error("version"):
				msg = u"Invalid Version"
			else:
				msg = u"An unknown error occurred."

				log.error('AIRS Export Errors: %s: %s', msg, model_state.form.errors)
			return make_internal_server_error(msg)

		res = Response(content_type='application/zip', charset=None)
		res.app_iter, res.length = _zip_stream(request, model_state)

		res.headers['Content-Disposition'] = 'attachment;filename=Export.zip'
		return res
コード例 #8
0
ファイル: pull.py プロジェクト: OpenCIOC/onlineresources
	def __call__(self):
		data, record_data = self._really_do_it()

		data = json.dumps(data)

		file = tempfile.TemporaryFile()
		zip = zipfile.ZipFile(file, 'w', zipfile.ZIP_DEFLATED)
		zip.writestr('export.json', data)

		if record_data:
			try:
				zip.write(record_data, 'record_data.xml')
			finally:
				try:
					os.unlink(record_data)
				except:
					pass

		zip.close()
		length = file.tell()
		file.seek(0)

		res = Response(content_type='application/zip', charset=None)
		res.app_iter = FileIterator(file)
		res.content_length = length

		res.headers['Content-Disposition'] = 'attachment;filename=Export.zip'
		return res
コード例 #9
0
ファイル: views.py プロジェクト: AmadeusITGroup/oscad2
def translation_template(request):
    resp = Response()
    resp.content_disposition = 'attachment; filename=oscad.pot'
    resp.app_iter = resource_stream('oscad', 'locale/oscad.pot')
    # otherwise Firefox thinks its a PowerPoint
    resp.content_type = 'text/plain'
    return resp
コード例 #10
0
ファイル: views.py プロジェクト: krissik/c3sPartyTicketing
def get_ticket(request):
    """
    this view gives a user access to her ticket via URL with code
    the response is a PDF download
    """
    _code = request.matchdict['code']
    _email = request.matchdict['email']
    _ticket = PartyTicket.get_by_code(_code)
    if isinstance(_ticket, NoneType):
        return HTTPFound(location=request.route_url('party'))
    if not (_ticket.email == _email):
        #print("no match!")
        return HTTPFound(location=request.route_url('party'))

    # prepare ticket URL with email & code
    # 'https://events.c3s.cc/ci/p1402/' + _ticket.email + _ticket.email_confirm_code
    # 'https://192.168.2.128:6544/ci/p1402/' + _ticket.email + _ticket.email_confirm_code
    _url = request.registry.settings[
        'c3spartyticketing.url'] + '/ci/p1402/' + _ticket.email_confirm_code

    # return a pdf file
    pdf_file = make_qr_code_pdf(_ticket, _url)
    response = Response(content_type='application/pdf')
    pdf_file.seek(0)  # rewind to beginning
    response.app_iter = open(pdf_file.name, "r")
    return response
コード例 #11
0
def view_download(request):
    req = request
    url_dict = req.matchdict
    id1 = url_dict['id'].isdigit() and url_dict['id'] or 0
    dis = url_dict['disposisi_id'].isdigit() and url_dict['disposisi_id'] or 0
    
    si = DBSession.query(Disposisi.surat_id).\
                      filter(Disposisi.id==dis,
                      )
    row = DBSession.query(SuratDetail
                  ).filter(SuratDetail.id==id1
                  ).first()
    b   = row.path
    c   = row.name
    d   = row.mime
    
    if not row:
        return {'success':False, "msg":self.id_not_found()}

    settings = get_settings()
    dir_path = os.path.realpath(settings['static_files'])
        
    filename = os.path.join(dir_path, b)
    headers  = [('Content-Disposition', 'attachment; filename=' + str(c))]
    response = Response(content_type=d, headerlist=headers)
    f = open(filename)
    response.app_iter = FileIter(f)
    print "----------------path------------------",b
    print "--------------nama file---------------",c
    print "------------content type--------------",d
    return response   
コード例 #12
0
ファイル: views.py プロジェクト: dmdm/PySite
 def xhr_filemgr(self):
     cmd = ''
     cmd_args = dict()
     for k in self.request.params:
         if k == '_':
             continue
         if k == 'cmd':
             cmd = self.request.params[k]
         else:
             if k.endswith("[]"):
                 k2 = k.rstrip("[]")
                 cmd_args[k2] = self.request.params.getall(k)
             else:
                 cmd_args[k] = self.request.params[k]
     finder = create_finder(self.context, self.request)
     try:
         finder.run(cmd, cmd_args)
     except FinderError as e:
         L.exception(e)
         if e.status:
             self.request.response.status = e.status
     if 'file' in finder.response:
         resp = Response()
         resp.app_iter = finder.response['file']
         if finder.headers:
             for k, v in finder.headers.items():
                 resp.headers[k] = v
         return resp
     else:
         if finder.headers:
             for k, v in finder.headers.items():
                 self.request.response.headers[k] = v
         return finder.response
コード例 #13
0
ファイル: sponsors_views.py プロジェクト: AnneGilles/zabo
def sponsor_image_small(request):
    """
    return a smaller image depending on the amount given
    (see get_sponsorshipGrade)
    """
    #print "this is sponsor image"
    _code = request.matchdict['linkcode']
    _abo = Abo.get_by_linkcode(_code)
    if isinstance(_abo, NoneType):
        if request.locale_name == 'de':
            the_url = 'zabo:static/ungueltig_s.png'
        else:
            the_url = 'zabo:static/invalid_s.png'
        return HTTPFound(request.static_url(the_url))
    #the_url = 'zabo:static/badge' + _abo.get_sponsorshipGrade() + '.png'
    #return HTTPFound(request.static_url(the_url))
    # XXX TODO: spool the files, don't serve from static !!!
    # link must be unknown to outside!
    base_path = request.registry.settings['base_path'] or ''
    the_file = os.path.abspath(
        os.path.join(
            base_path,
            'zabo/static_offline/badge' + _abo.get_sponsorshipGrade() + '_s.png')
    )
    response = Response(content_type='image/png')
    response.app_iter = open(the_file, "r")
    return response  # pragma: no cover
コード例 #14
0
def pdf_file(request):
    sbid = request.matchdict['sbid']
    req_part = request.matchdict['part'].split('-')

    monograph = Monograph.get(request.db, sbid)
    if len(req_part) == 2 and req_part[1] == monograph.isbn:
        try:
            pdf_file = request.db.fetch_attachment(monograph._id, monograph.pdf_file['filename'], stream=True)
        except (couchdbkit.ResourceNotFound, AttributeError):
            raise exceptions.NotFound()
    else:
        parts = get_book_parts(monograph._id, request)
        try:
            selected_part = parts[int(req_part[2])]
        except (IndexError, ValueError):
            raise exceptions.NotFound()

        part = Part.get(request.db, selected_part['part_sbid'])
        try:
            pdf_file = request.db.fetch_attachment(part._id, part.pdf_file['filename'], stream=True)
        except (couchdbkit.ResourceNotFound, AttributeError):
            raise exceptions.NotFound()

    response = Response(content_type='application/pdf', expires=datetime_rfc822(365))
    response.app_iter = pdf_file
    try:
        response.etag = str(hash(pdf_file))
    except TypeError:
        #cannot generate a hash for the object, return it without the ETag
        pass

    return response
コード例 #15
0
ファイル: views.py プロジェクト: rondinelisaad/scielobooks
def swf_file(request):
    sbid = request.matchdict["sbid"]
    req_part = request.matchdict["part"]

    monograph = Monograph.get(request.db, sbid)
    if req_part == monograph.isbn:
        try:
            pdf_file = request.db.fetch_attachment(monograph._id, monograph.pdf_file["filename"])
        except (couchdbkit.ResourceNotFound, AttributeError):
            raise exceptions.NotFound()
    else:
        parts = get_book_parts(monograph._id, request)
        try:
            selected_part = parts[int(req_part)]
        except (IndexError, ValueError):
            raise exceptions.NotFound()

        part = Part.get(request.db, selected_part["part_sbid"])
        try:
            pdf_file = request.db.fetch_attachment(part._id, part.pdf_file["filename"])
        except (couchdbkit.ResourceNotFound, AttributeError):
            raise exceptions.NotFound()

    swf_file = functions.convert_pdf2swf(pdf_file)

    response = Response(content_type="application/x-shockwave-flash")
    response.app_iter = swf_file

    return response
コード例 #16
0
def swf_file(request):
    sbid = request.matchdict['sbid']
    req_part = request.matchdict['part']

    monograph = Monograph.get(request.db, sbid)
    if req_part == monograph.isbn:
        try:
            pdf_file = request.db.fetch_attachment(monograph._id, monograph.pdf_file['filename'])
        except (couchdbkit.ResourceNotFound, AttributeError):
            raise exceptions.NotFound()
    else:
        parts = get_book_parts(monograph._id, request)
        try:
            selected_part = parts[int(req_part)]
        except (IndexError, ValueError):
            raise exceptions.NotFound()

        part = Part.get(request.db, selected_part['part_sbid'])
        try:
            pdf_file = request.db.fetch_attachment(part._id, part.pdf_file['filename'])
        except (couchdbkit.ResourceNotFound, AttributeError):
            raise exceptions.NotFound()

    swf_file = functions.convert_pdf2swf(pdf_file)

    response = Response(content_type='application/x-shockwave-flash', expires=datetime_rfc822(365))
    response.app_iter = swf_file
    try:
        response.etag = str(hash(swf_file))
    except TypeError:
        #cannot generate a hash for the object, return it without the ETag
        pass

    return response
コード例 #17
0
ファイル: views.py プロジェクト: adroullier/nive
 def SendFile(self, file):
     """
     Creates the response and sends the file back. Uses the FileIterator.
     
     #!date format
     """
     if not file:
         return HTTPNotFound()
     last_mod = file.mtime()
     if not last_mod:
         last_mod = self.context.meta.pool_change
     r = Response(content_type=str(GetMimeTypeExtension(file.extension)), conditional_response=True)
     iterator = file.iterator()
     if iterator:
         r.app_iter = iterator
     else:
         try:
             r.body = file.read()
         except FileNotFound:
             raise NotFound
     r.content_length = file.size
     r.last_modified = last_mod
     r.etag = '%s-%s' % (last_mod, hash(file.path))
     r.cache_expires(self.fileExpires)
     return r    
コード例 #18
0
ファイル: views.py プロジェクト: alvesjnr/scielobooks
def pdf_file(request):
    sbid = request.matchdict['sbid']
    req_part = request.matchdict['part']

    monograph = Monograph.get(request.db, sbid)
    if req_part == monograph.isbn:
        try:
            pdf_file = request.db.fetch_attachment(monograph._id, monograph.pdf_file['filename'], stream=True)
        except (couchdbkit.ResourceNotFound, AttributeError):
            raise exceptions.NotFound()
    else:
        parts = get_book_parts(monograph._id, request)
        try:
            selected_part = parts[int(req_part)]
        except (IndexError, ValueError):
            raise exceptions.NotFound()

        part = Part.get(request.db, selected_part['part_sbid'])
        try:
            pdf_file = request.db.fetch_attachment(part._id, part.pdf_file['filename'], stream=True)
        except (couchdbkit.ResourceNotFound, AttributeError):
            raise exceptions.NotFound()

    response = Response(content_type='application/pdf')
    response.app_iter = pdf_file

    return response
コード例 #19
0
ファイル: dues_2018.py プロジェクト: C3S/c3sMembership
def get_dues18_invoice(invoice, request):
    """
    Gets the invoice and returns a PDF response.

    Args:
        invoice: The invoice for which the PDF is requested.
        request: The pyramid.request.Request object.

    Returns:
        A PDF response in case the invoice exists. Otherwise a redirect to the
        error page.
    """
    if invoice is None:
        request.session.flash(
            u'No invoice found!',
            'danger'  # message queue for user
        )
        return HTTPFound(request.route_url('error'))

    if invoice.is_reversal:
        pdf_file = make_reversal_pdf_pdflatex(invoice)
    else:
        pdf_file = make_invoice_pdf_pdflatex(invoice)
    response = Response(content_type='application/pdf')
    pdf_file.seek(0)
    response.app_iter = open(pdf_file.name, "r")
    return response
コード例 #20
0
ファイル: user.py プロジェクト: AnneGilles/c3sar
def generate_contract_de_fdf_pdf(user):
    """
    take user information and generate fdf
    """
    if DEBUG:  # pragma: no cover
        print "===== this is generate_fdf_pdf"
    from fdfgen import forge_fdf
    fields = [
        ('surname', user.surname),
        ('lastname', user.lastname),
        ('street', user.street),
        ('number', user.number),
        ('postcode', user.postcode),
        ('city', user.city),
        ('email', user.email),
        ('user_id', user.id),
        ('username', user.username),
        ('date_registered', user.date_registered),
        ('date_generated', datetime.now()),
        ]
    #generate fdf string
    fdf = forge_fdf("", fields, [], [], [])
    # write to file
    my_fdf_filename = "fdf" + str(user.id) + ".fdf"

    fdf_file = open(my_fdf_filename, "w")
    fdf_file.write(fdf)
    fdf_file.close()
    if DEBUG:  # pragma: no cover
        print "fdf file written."

    res = os.popen(
        'pdftk pdftk/berechtigungsvertrag-2.2.pdf fill_form %s output'
        ' formoutput.pdf flatten' % my_fdf_filename).read()

    if DEBUG:  # pragma: no cover
        print res
        print "done: put data into form and finalized it"

    # delete the fdf file
    res = os.popen('rm %s' % my_fdf_filename)
    if DEBUG:  # pragma: no cover
        print res
        print "combining with bank account form"
    # combine
    res = os.popen(
        'pdftk formoutput.pdf pdftk/bankaccount.pdf output output.pdf').read()
    if DEBUG:  # pragma: no cover
        print res
        print "combined personal form and bank form"

    # delete the fdf file
    os.popen('rm formoutput.pdf').read()

    # return a pdf file
    from pyramid.response import Response
    response = Response(content_type='application/pdf')
    response.app_iter = open("output.pdf", "r")
    return response
コード例 #21
0
ファイル: views.py プロジェクト: danjac/photoapp
def image(photo, request):
    """
    Show full-size image
    """

    response = Response(content_type=photo.content_type)
    response.app_iter = photo.get_image(request.fs).read()
    return response
コード例 #22
0
def qrcode(request):
    ''' streams a qrcode image for the param 'content' (defaults to http://trimet.org)
    '''
    response = Response(content_type='image/png')
    content = html_utils.get_first_param(request, 'content', 'http://trimet.org')
    img_io = qr_to_stream(content)
    response.app_iter = img_io
    return response
コード例 #23
0
ファイル: views.py プロジェクト: danjac/photoapp
def thumbnail(photo, request):
    """
    Renders the thumbnailed image of the photo.
    """

    response = Response(content_type="image/jpeg")
    response.app_iter = photo.get_thumbnail(request.fs).read()
    return response
コード例 #24
0
ファイル: views.py プロジェクト: amtrack/s3authbasic
def site(request):
    path = unquote(request.path)
    s3file = request.s3.get_file(path)
    if s3file is None:
        return HTTPNotFound()
    response = Response(content_type=s3file.content_type)
    response.app_iter = s3file
    return response
コード例 #25
0
ファイル: sse.py プロジェクト: cristinel-casu/sandbox
def stream(request):
    response = Response()
    response.headers.update({'Access-Control-Allow-Origin': '*'})
    response.content_type = 'text/event-stream'
    response.cache_expires(0)

    response.app_iter = produce()
    return response
コード例 #26
0
    def toolbar_handler(request):
        root_path = request.route_path("debugtoolbar.root")
        request.exc_history = exc_history
        remote_addr = request.remote_addr

        if request.path.startswith(root_path) or (not remote_addr in hosts):
            return handler(request)

        toolbar = DebugToolbar(request, panel_classes)
        request.debug_toolbar = toolbar

        _handler = handler

        for panel in toolbar.panels:
            _handler = panel.wrap_handler(_handler)

        try:
            response = _handler(request)
        except Exception:
            info = sys.exc_info()
            if exc_history is not None:
                tb = get_traceback(info=info, skip=1, show_hidden_frames=False, ignore_system_exceptions=True)
                for frame in tb.frames:
                    exc_history.frames[frame.id] = frame

                exc_history.tracebacks[tb.id] = tb
                body = tb.render_full(request, evalex=True).encode("utf-8", "replace")
                response = Response(body, status=500)
                toolbar.process_response(response)
                return response

            raise

        else:
            if intercept_redirects:
                # Intercept http redirect codes and display an html page with a
                # link to the target.
                if response.status_int in redirect_codes:
                    redirect_to = response.location
                    redirect_code = response.status_int
                    if redirect_to:
                        content = render(
                            "pyramid_debugtoolbar:templates/redirect.jinja2",
                            {"redirect_to": redirect_to, "redirect_code": redirect_code},
                        )
                        content = content.encode(response.charset)
                        response.content_length = len(content)
                        response.location = None
                        response.app_iter = [content]
                        response.status_int = 200

            toolbar.process_response(response)
            return response

        finally:
            # break circref
            del request.debug_toolbar
コード例 #27
0
ファイル: views.py プロジェクト: llacroix/pyramid_xmms
def wait(request):
    obs = Observer(obss=observers)

    observers.append(obs)

    r = Response()
    r.content_type = 'application/json'
    r.app_iter = obs
    return r
コード例 #28
0
def member_list_date_pdf_view(request):
    """
    The membership list *for a given date* for printout as PDF.
    The date is supplied in and parsed from the URL, e.g.
    http://0.0.0.0:6543/aml-2014-12-31.pdf

    The PDF is generated using pdflatex.

    If the date is not parseable, an error message is shown.
    """
    effective_date_string = ''
    try:
        effective_date_string = request.matchdict['date']
        effective_date = datetime.strptime(effective_date_string, '%Y-%m-%d') \
            .date()
    except (KeyError, ValueError):
        request.session.flash(
            "Invalid date! '{}' does not compute! "
            "try again, please! (YYYY-MM-DD)".format(
                effective_date_string),
            'danger'
        )
        return HTTPFound(request.route_url('error'))

    # TODO: repositories are data layer and must only be used by the business
    # layer. Introduce business layer logic which uses the repositories and can
    # be accessed by this view via the request.
    members = request.registry.member_information.get_accepted_members_sorted(
        effective_date)

    membership_list_entries = []
    for member in members:
        membership_list_entries.append({
            'lastname': member.lastname,
            'firstname': member.firstname,
            'membership_number': member.membership_number,
            'address1': member.address1,
            'address2': member.address2,
            'postcode': member.postcode,
            'city': member.city,
            'country': member.country,
            'membership_date': member.membership_date,
            'membership_loss_date': member.membership_loss_date,
            'membership_loss_type': member.membership_loss_type,
            'membership_number': member.membership_number,
            'shares_count': request.registry.share_information \
                .get_member_share_count(
                        member.membership_number,
                        effective_date)
        })

    response = Response(content_type='application/pdf')
    response.app_iter = generate_membership_list_pdf(
        effective_date,
        membership_list_entries)
    return response
コード例 #29
0
ファイル: httpd.py プロジェクト: jamilatta/balaio
def get_file_from_attempt(request):
    """
    Get a portion of a package bound to an Attempt.

    Get a specific member, by name:
    `/api/:api_id/files/:attempt_id/:target.zip/?file=:member`

    Get more than one specific members, by name:
    `/api/:api_id/files/:attempt_id/:target.zip/?file=:member&file=:member2`

    Get the full package:
    `/api/:api_id/files/:attempt_id/:target.zip/?full=true`
    """
    has_body = False

    attempt_id = request.matchdict.get('attempt_id', None)
    try:
        attempt = request.db.query(models.Attempt).get(attempt_id)
    except DataError:
        return HTTPNotFound()

    if attempt is None:
        return HTTPNotFound()

    response = Response(content_type='application/zip')

    # Get the full package.
    if asbool(request.GET.get('full', False)):
       response.app_iter = open(attempt.filepath, 'rb')
       has_body = True

    else:
        # Get partial portions of the package.
        files = [member for attr, member in request.GET.items() if attr == 'file']

        try:
            if files:
                response.app_iter = attempt.analyzer.subzip(*files)
                has_body = True
        except ValueError:
            return HTTPBadRequest()

    return response if has_body else HTTPBadRequest()
コード例 #30
0
def gb_all_nicks_view(request):
    tf = tempfile.NamedTemporaryFile(prefix='genbank_export_nick_%s' % request.job.key,
                                     suffix='.gb', delete=True)
    tf.write(genbank.all_nicks_to_GB(request.job.id))
    tf.seek(0)
    response = Response(content_type='text/plain')
    response.app_iter = tf
    response.headers['Content-Disposition'] = ("attachment; filename=all_nickases.gb")

    return response
コード例 #31
0
ファイル: views.py プロジェクト: scieloorg/scielobooks
def evaluation_attachments(request):
    sbid = request.matchdict['sbid']
    filename = request.matchdict['filename']

    monograph = Monograph.get(request.db, sbid)
    try:
        attachment = request.db.fetch_attachment(monograph._id,
                                                 filename,
                                                 stream=True)
    except couchdbkit.ResourceNotFound:
        raise exceptions.NotFound()

    response = Response(content_type='application/octet-stream')
    response.app_iter = attachment

    return response
コード例 #32
0
def csv_all_guides_view(request):
    tf = tempfile.NamedTemporaryFile(prefix='csv_export_all_guides_%s' %
                                     request.job.key,
                                     suffix='.csv',
                                     delete=True)
    # this is where I usually put stuff in the file

    job = request.job

    tf.write(genbank.all_spacers_to_CSV(job))
    tf.seek(0)

    response = Response(content_type='application/csv')
    response.app_iter = tf
    response.headers['Content-Disposition'] = (
        "attachment; filename=offtargets.csv")
    return response
コード例 #33
0
ファイル: views.py プロジェクト: wildcardcorp/princexmlserver
def convert(req):
    """
    Post request variables:
        css: []  # json encoded
        xml: ""
        doctype: auto | xml | html | html5(default html)
    """
    css = json.loads(req.params['css'])
    xml = req.params['xml']
    doctype = req.params.get('doctype', 'html')
    if req.keep_stats:
        pdf = _stats(req, prince.create_pdf, xml, css, doctype)
    else:
        pdf = prince.create_pdf(xml, css, doctype)

    resp = Response(content_type='application/pdf')
    resp.app_iter = BytesIO(pdf)
    return resp
コード例 #34
0
def csv_one_spacer_view(request):
    tf = tempfile.NamedTemporaryFile(prefix='csv_export_one_spacer_%s' %
                                     request.job.key,
                                     suffix='.csv',
                                     delete=True)
    # this is where I usually put stuff in the file

    job = request.job
    spacer = Session.query(Spacer).get(request.matchdict["spacerid"])

    tf.write(genbank.one_spacer_to_CSV(job, spacer))
    tf.seek(0)

    response = Response(content_type='application/csv')
    response.app_iter = tf
    response.headers['Content-Disposition'] = (
        "attachment; filename=offtargets.csv")
    return response
コード例 #35
0
ファイル: views.py プロジェクト: scieloorg/scielobooks
def epub_file(request):
    sbid = request.matchdict['sbid']

    monograph = Monograph.get(request.db, sbid)
    try:
        epub_file = request.db.fetch_attachment(
            monograph._id, monograph.epub_file['filename'], stream=True)
    except (couchdbkit.ResourceNotFound, AttributeError, KeyError):
        raise exceptions.NotFound()

    response = Response(content_type='application/epub',
                        expires=datetime_rfc822(365))
    response.app_iter = epub_file
    try:
        response.etag = str(hash(epub_file))
    except TypeError:
        #cannot generate a hash for the object, return it without the ETag
        pass

    return response
コード例 #36
0
def gb_one_nick_view(request):
    #response = Response(content_type='application/csv')
    tf = tempfile.NamedTemporaryFile(prefix='genbank_export_one_nick_%s' %
                                     request.job.key,
                                     suffix='.gb',
                                     delete=True)
    # this is where I usually put stuff in the file

    job = request.job
    sf = Session.query(Spacer).get(request.matchdict["spacerfwdid"])
    sr = Session.query(Spacer).get(request.matchdict["spacerrevid"])

    tf.write(genbank.one_nick_to_GB(job, sf, sr))
    tf.seek(0)

    #response = Response(content_type='application/csv')
    response = Response(content_type='text/plain')
    response.app_iter = tf
    #response.headers['Content-Disposition'] = ("attachment; filename=nickase_export.gb")
    # a target=_blank
    return response
コード例 #37
0
ファイル: api.py プロジェクト: CLARIAH/docker-pyff
def search_handler(request):
    match = request.params.get('q', request.params.get('query', None))
    entity_filter = request.params.get('entity_filter', '{http://pyff.io/role}idp')
    log.debug("match={}".format(match))
    load_icon = request.params.get('load_icon', False)
    store = request.registry.md.store

    #import pdb; pdb.set_trace()
    def _response():
        yield b('[')
        in_loop = False
        for e in store.search(query=match.lower(), entity_filter=entity_filter):
            if in_loop:
                yield b(',')
            yield b(dumps(e))
            in_loop = True
        yield b(']')

    response = Response(content_type='application/json')
    response.app_iter = _response()
    return response
コード例 #38
0
ファイル: api.py プロジェクト: nextgis/nextgisweb
def cog(resource, request):
    request.resource_permission(PERM_READ)

    fn = env.raster_layer.workdir_filename(resource.fileobj)
    filesize = os.path.getsize(fn)

    if request.method == "HEAD":
        return Response(
            accept_ranges="bytes",
            content_length=filesize,
            content_type="image/geo+tiff"
        )

    if request.method == "GET":
        if not resource.cog:
            raise ValidationError(_("Requested raster is not COG."))

        range = request.range
        if range is None:
            raise ValidationError(_("Range header is missed or invalid."))

        content_range = range.content_range(filesize)
        if content_range is None:
            raise ValidationError(_("Range %s can not be read." % range))

        content_length = content_range.stop - content_range.start
        response = Response(
            status_code=206,
            content_range=content_range,
            content_type="image/geo+tiff"
        )

        response.app_iter = RangeFileWrapper(
            open(fn, "rb"),
            offset=content_range.start,
            length=content_length
        )
        response.content_length = content_length

        return response
コード例 #39
0
def echo(request):
    """
    Echo an uploaded file back to the client as an text/html document so it can
    be handled by Ext.

    The response is JSON-wrapped and base64-encoded to ensure that there are no
    special HTML characters or charset problems and so that braindead ext
    doesn't barf on it.

    See:
    http://docs.sencha.com/ext-js/3-4/#!/api/Ext.form.BasicForm-cfg-fileUpload
    """
    if request.method != 'POST':
        return HTTPBadRequest()
    try:
        file = request.POST['file']
    except KeyError:
        return HTTPBadRequest()
    response = Response()
    response.app_iter = json_base64_encode(file.filename, file.file)
    response.content_type = 'text/html'
    return response
コード例 #40
0
def dynamic_item_img(request):
    item = Item.from_id(request.matchdict['item_id'])
    response = Response(content_type='image/jpeg')

    class Hack():
        def __init__(self, img):
            self.img = img
            self.idx = 0

        def getattr(self, item):
            print("item {}".format(item))
            raise AttributeError

        def __getattr(self, item):
            print("item {}".format(item))
            raise AttributeError

        def read(self, block_size=None):
            if self.idx >= len(self.img):
                return ''
            if block_size is None:
                self.idx = len(self.img)
                return self.img
            if self.idx + block_size > len(self.img):
                r = self.img[self.idx:]
                self.idx = len(self.img)
                return r
            else:
                r = self.img[self.idx:block_size]
                self.idx += block_size
                return r

        def close(self):
            pass

    h = Hack(item.img.img)
    response.app_iter = FileIter(h)
    return response
コード例 #41
0
def echo(request):
    """
    Echo an uploaded file back to the client as an text/html document so it can
    be handled by Ext.

    The response is JSON-wrapped and base64-encoded to ensure that there are no
    special HTML characters or charset problems and so that braindead ext
    does not barf on it.

    See:
    http://docs.sencha.com/ext-js/3-4/#!/api/Ext.form.BasicForm-cfg-fileUpload
    """
    try:
        file_ = request.POST["file"]
    except KeyError:
        return HTTPBadRequest()
    response = Response()
    response.app_iter = json_base64_encode(file_.filename, file_.file)
    return set_common_headers(request,
                              "echo",
                              NO_CACHE,
                              response=response,
                              content_type="text/html")
コード例 #42
0
    def __call__(self):
        request = self.request
        user = request.user

        if not user:
            return make_401_error("Access Denied", "Export")

        if "clbcexport" not in user.cic.ExternalAPIs:
            return make_401_error("Insufficient Permissions", "Export")

        with request.connmgr.get_connection("admin") as conn:
            cursor = conn.execute(
                "SELECT CAST(Vendor AS nvarchar(max)) AS Vendor FROM CLBC_VENDOR_EXPORT"
            )

            data = [x[0] for x in cursor.fetchall()]

            cursor.close()

        data.insert(0, '<?xml version="1.0" encoding="UTF-8"?>\r\n<Vendors>')
        data.append("</Vendors>")
        data = "\r\n".join(data).encode("utf8")

        file = tempfile.TemporaryFile()
        zip = zipfile.ZipFile(file, "w", zipfile.ZIP_DEFLATED)
        zip.writestr("export.xml", data)
        zip.close()
        length = file.tell()
        file.seek(0)

        res = Response(content_type="application/zip", charset=None)
        res.app_iter = FileIterator(file)
        res.content_length = length

        res.headers["Content-Disposition"] = "attachment;filename=Export.zip"
        return res
コード例 #43
0
def phonology(request):
    """
    Computes phonology of a specified perspective.

    Perspective is specified by request parameters 'perspective_client_id' and 'perspective_object_id',
    example of a request: /phonology?perspective_client_id=345&perspective_object_id=2.
    """

    perspective_cid = request.params.get('perspective_client_id')
    perspective_oid = request.params.get('perspective_object_id')

    # Checking if we have limits on number of computed results.

    limit = (None if 'limit' not in request.params else
        int(request.params.get('limit')))

    limit_exception = (None if 'limit_exception' not in request.params else
        int(request.params.get('limit_exception')))

    limit_no_vowel = (None if 'limit_no_vowel' not in request.params else
        int(request.params.get('limit_no_vowel')))

    limit_result = (None if 'limit_result' not in request.params else
        int(request.params.get('limit_result')))

    # TODO: get perspective's translation and language it belongs to.

    # We get lexical entries of this perspective with markup'ed sounds.

    Sound = aliased(Entity, name = "Sound")
    PublishingSound = aliased(PublishingEntity, name = "PublishingSound")

    query = DBSession.query(LexicalEntry, Entity, Sound, PublishingEntity, PublishingSound).filter(and_(
        LexicalEntry.parent_client_id == perspective_cid,
        LexicalEntry.parent_object_id == perspective_oid,
        LexicalEntry.marked_for_deletion == False,
        Entity.parent_client_id == LexicalEntry.client_id,
        Entity.parent_object_id == LexicalEntry.object_id,
        Entity.marked_for_deletion == False,
        Entity.additional_metadata.contains({"data_type": "praat markup"}),
        PublishingEntity.client_id == Entity.client_id,
        PublishingEntity.object_id == Entity.object_id,
        PublishingEntity.published == True,
        PublishingEntity.accepted == True,
        Sound.client_id == Entity.self_client_id,
        Sound.object_id == Entity.self_object_id,
        Sound.marked_for_deletion == False,
        PublishingSound.client_id == Sound.client_id,
        PublishingSound.object_id == Sound.object_id,
        PublishingSound.published == True,
        PublishingSound.accepted == True))

    # We process these lexical entries in batches. Just in case, it seems that perspectives rarely have more
    # then several hundred such lexical entries.

    exception_counter = 0
    no_vowel_counter = 0
    result_list = list()

    for index, row in enumerate(query.yield_per(100)):

        markup_url = row.Entity.content
        sound_url = row.Sound.content

        cache_key = 'phonology:{0}:{1}:{2}:{3}'.format(
            row.Sound.client_id, row.Sound.object_id,
            row.Entity.client_id, row.Entity.object_id)

        # Checking if we have cached result for this pair of sound/markup.

        cache_result = CACHE.get(cache_key)

        if cache_result == 'no_vowel':

            log.debug(
                '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}) '
                '[CACHE {7}]: no vowels\n{8}\n{9}'.format(
                index,
                row.LexicalEntry.client_id, row.LexicalEntry.object_id,
                row.Sound.client_id, row.Sound.object_id,
                row.Entity.client_id, row.Entity.object_id,
                cache_key, markup_url, sound_url))

            no_vowel_counter += 1

            if (limit_no_vowel and no_vowel_counter >= limit_no_vowel or
                limit and index + 1 >= limit):
                break

            continue

        # If we have cached exception, we do the same as with absence of vowels, show its info and
        # continue.

        elif isinstance(cache_result, tuple) and cache_result[0] == 'exception':
            exception, traceback_string = cache_result[1:3]

            log.debug(
                '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '
                '[CACHE {7}]: exception\n{8}\n{9}'.format(
                index,
                row.LexicalEntry.client_id, row.LexicalEntry.object_id,
                row.Sound.client_id, row.Sound.object_id,
                row.Entity.client_id, row.Entity.object_id,
                cache_key, markup_url, sound_url))

            log.debug(traceback_string)

            exception_counter += 1

            if (limit_exception and exception_counter >= limit_exception or
                limit and index + 1 >= limit):
                break

            continue

        # If we actually have the result, we use it and continue.

        elif cache_result:

            result_string = '\n'.join(
                'tier {0} \'{1}\': {2}'.format(tier_number, tier_name,
                    
                    tier_result_seq_list if not isinstance(tier_result_seq_list, list) else
                    tier_result_seq_list[0] if len(tier_result_seq_list) <= 1 else
                    ''.join('\n  {0}'.format(tier_result) for tier_result in tier_result_seq_list))

                    for tier_number, tier_name, tier_result_seq_list in cache_result)

            log.debug(
                '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}) '
                '[CACHE {7}]:\n{8}\n{9}\n{10}'.format(
                index,
                row.LexicalEntry.client_id, row.LexicalEntry.object_id,
                row.Sound.client_id, row.Sound.object_id,
                row.Entity.client_id, row.Entity.object_id,
                cache_key, markup_url, sound_url, result_string))

            result_list.append(cache_result)

            if (limit_result and len(result_list) >= limit_result or
                limit and index + 1 >= limit):
                break

            continue

        try:
            # Getting markup, checking for each tier if it needs to be processed.

            markup_bytes = urllib.request.urlopen(urllib.parse.quote(markup_url, safe = '/:')).read()

            textgrid = pympi.Praat.TextGrid(xmax = 0)
            textgrid.from_file(
                io.BytesIO(markup_bytes),
                codec = chardet.detect(markup_bytes)['encoding'])

            tier_data_list = []
            vowel_flag = False

            for tier_number, tier_name in textgrid.get_tier_name_num():

                raw_interval_list = textgrid.get_tier(tier_number).get_all_intervals()
                raw_interval_seq_list = [[]]

                # Splitting interval sequence on empty intervals.

                for raw_index, interval in enumerate(raw_interval_list):

                    if len(interval[2].strip()) <= 0:
                        if len(raw_interval_seq_list[-1]) > 0:
                            raw_interval_seq_list.append([])

                    else:
                        raw_interval_seq_list[-1].append((raw_index, interval))

                if len(raw_interval_seq_list[-1]) <= 0:
                    del raw_interval_seq_list[-1]

                # Selecting interval sequences for analysis, checking if we have unusual markup.
                
                interval_seq_list = []
                interval_idx_to_raw_idx = dict()

                unusual_markup_flag = False
                unusual_markup_list = []

                for raw_interval_seq in raw_interval_seq_list:

                    interval_seq_list.append([])
                    interval_idx_to_raw_idx[len(interval_seq_list) - 1] = {}

                    for partial_raw_index, (raw_index, interval) in enumerate(raw_interval_seq):

                        interval_text = interval[2].strip()

                        # Accepting interval if its text contains at least one vowel, and is short enough or
                        # is a valid phonetic transcription.

                        transcription_check = re.fullmatch(transcription_re, interval_text)

                        if (len(interval_text) > 0 and
                                any(character in vowel_set for character in interval_text) and
                                (len(interval_text) <= 2 or transcription_check)):

                            interval_seq_list[-1].append(interval)

                            sequence_index = len(interval_seq_list) - 1
                            interval_index = len(interval_seq_list[-1]) - 1

                            interval_idx_to_raw_idx[(sequence_index, interval_index)] = raw_index
                            interval_idx_to_raw_idx[sequence_index][interval_index] = partial_raw_index

                        # Noting if the interval contains unusual (i.e. non-transcription) markup.

                        elif not transcription_check:

                            unusual_markup_flag = True
                            unusual_markup_list.append((raw_index, interval))

                transcription_list = [text for begin, end, text in raw_interval_list]
                transcription = ''.join(transcription_list)

                selected_list = [text
                    for interval_list in interval_seq_list
                        for begin, end, text in interval_list]

                selected = ''.join(selected_list)

                # If we have intervals with unusual markup, we report them.

                if unusual_markup_flag:
                    log.debug(
                        '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '
                        'tier {7} \'{8}\' has interval(s) with unusual transcription text: '
                        '{9} / {10}'.format(
                        index,
                        row.LexicalEntry.client_id, row.LexicalEntry.object_id,
                        row.Sound.client_id, row.Sound.object_id,
                        row.Entity.client_id, row.Entity.object_id,
                        tier_number, tier_name, transcription, dict(unusual_markup_list)))

                # If the markup does not have any vowels, we note it and also report it.

                if all(character not in vowel_set for character in transcription):

                    tier_data_list.append((tier_number, tier_name, 'no_vowel'))

                    log.debug(
                        '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '
                        'tier {7} \'{8}\' doesn\'t have any vowel markup: {9}'.format(
                        index,
                        row.LexicalEntry.client_id, row.LexicalEntry.object_id,
                        row.Sound.client_id, row.Sound.object_id,
                        row.Entity.client_id, row.Entity.object_id,
                        tier_number, tier_name, transcription_list))

                # It is also possible that while full transcription has vowels, intervals selected for
                # analysis do not. In that case we also note it and report it.

                elif not any(character in vowel_set for character in selected):

                    tier_data_list.append((tier_number, tier_name, 'no_vowel_selected'))

                    log.debug(
                        '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '
                        'tier {7} \'{8}\' intervals to be processed don\'t have any vowel markup: '
                        'markup {9}, selected {10}'.format(
                        index,
                        row.LexicalEntry.client_id, row.LexicalEntry.object_id,
                        row.Sound.client_id, row.Sound.object_id,
                        row.Entity.client_id, row.Entity.object_id,
                        tier_number, tier_name,
                        transcription_list, selected_list))

                # Otherwise we store tier data to be used during processing of the sound file.

                else:
                    tier_data_list.append((tier_number, tier_name,
                        (raw_interval_list, raw_interval_seq_list, interval_seq_list,
                            interval_idx_to_raw_idx, transcription)))

                    vowel_flag = True

            # If there are no tiers with vowel markup, we skip this sound-markup file altogether.

            if not vowel_flag:

                CACHE.set(cache_key, 'no_vowel')
                no_vowel_counter += 1

                if (limit_no_vowel and no_vowel_counter >= limit_no_vowel or
                    limit and index + 1 >= limit):
                    break

                continue

            # Otherwise we retrieve the sound file and analyse each vowel-containing markup.
            # Partially inspired by source code at scripts/convert_five_tiers.py:307.

            sound = None
            with tempfile.NamedTemporaryFile() as temp_file:

                sound_file = urllib.request.urlopen(urllib.parse.quote(sound_url, safe = '/:'))
                temp_file.write(sound_file.read())
                temp_file.flush()

                sound = AudioPraatLike(pydub.AudioSegment.from_wav(temp_file.name))

            tier_result_list = []

            for tier_number, tier_name, tier_data in tier_data_list:

                if tier_data == 'no_vowel' or tier_data == 'no_vowel_selected':
                    tier_result_list.append((tier_number, tier_name, tier_data))
                    continue

                # Analyzing vowel sounds of each interval sequence.

                (raw_interval_list, raw_interval_seq_list, interval_seq_list, interval_idx_to_raw_idx,
                    transcription) = tier_data

                tier_result_list.append((tier_number, tier_name, []))

                for seq_index, (raw_interval_list, interval_list) in enumerate(zip(
                    raw_interval_seq_list, interval_seq_list)):

                    if len(interval_list) <= 0:
                        continue

                    (max_intensity_index, max_intensity, max_length_index, max_length) = \
                        find_max_interval_praat(sound, interval_list)

                    max_intensity_interval = interval_list[max_intensity_index]
                    max_length_interval = interval_list[max_length_index]

                    max_intensity_f1_f2 = sound.get_interval_formants(*max_intensity_interval[:2])
                    max_length_f1_f2 = sound.get_interval_formants(*max_length_interval[:2])

                    # Compiling results.

                    max_length_str = '{0} {1:.3f} [{2}]'.format(
                        max_length_interval[2], max_length,
                        len(''.join(text for index, (begin, end, text) in
                            raw_interval_list[:interval_idx_to_raw_idx[seq_index][max_length_index]])))

                    max_intensity_str = '{0} {1:.3f} [{2}]'.format(
                        max_intensity_interval[2],
                        max_intensity,
                        len(''.join(text for index, (begin, end, text) in
                            raw_interval_list[:interval_idx_to_raw_idx[seq_index][max_intensity_index]])))

                    tier_result_list[-1][2].append([
                        ''.join(text for index, (begin, end, text) in raw_interval_list),
                        max_length_str,
                        '{0:.3f}'.format(max_length_f1_f2[0]),
                        '{0:.3f}'.format(max_length_f1_f2[1]),
                        max_intensity_str,
                        '{0:.3f}'.format(max_intensity_f1_f2[0]),
                        '{0:.3f}'.format(max_intensity_f1_f2[1]),
                        '+' if max_intensity_index == max_length_index else '-'])

            # Saving result.

            result_list.append(tier_result_list)
            CACHE.set(cache_key, tier_result_list)

            result_string = '\n'.join(
                'tier {0} \'{1}\': {2}'.format(tier_number, tier_name,
                    
                    tier_result_seq_list if not isinstance(tier_result_seq_list, list) else
                    tier_result_seq_list[0] if len(tier_result_seq_list) <= 1 else
                    ''.join('\n  {0}'.format(tier_result) for tier_result in tier_result_seq_list))

                    for tier_number, tier_name, tier_result_seq_list in tier_result_list)

            log.debug(
                '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}):'
                '\n{7}\n{8}\n{9}'.format(
                index,
                row.LexicalEntry.client_id, row.LexicalEntry.object_id,
                row.Sound.client_id, row.Sound.object_id,
                row.Entity.client_id, row.Entity.object_id,
                markup_url, sound_url, result_string))

            # Stopping earlier, if required.

            if (limit_result and len(result_list) >= limit_result or
                limit and index + 1 >= limit):
                break

        except Exception as exception:

            #
            # NOTE
            #
            # Exceptional situations encountered so far:
            #
            #   1. TextGrid file actually contains sound, and wav file actually contains textgrid markup.
            #
            #     Perspective 330/4, LexicalEntry 330/7, sound-Entity 330/2328, markup-Entity 330/6934
            #
            #   2. Markup for one of the intervals contains a newline "\n", and pympi fails to parse it.
            #     Praat parses such files without problems.
            #
            #     Perspective 330/4, LexicalEntry 330/20, sound-Entity 330/6297, markup-Entity 330/6967
            #

            log.debug(
                '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '
                'exception\n{7}\n{8}'.format(
                index,
                row.LexicalEntry.client_id, row.LexicalEntry.object_id,
                row.Sound.client_id, row.Sound.object_id,
                row.Entity.client_id, row.Entity.object_id,
                markup_url, sound_url))

            # if we encountered an exception, we show its info and remember not to try offending
            # sound/markup pair again.

            traceback_string = ''.join(traceback.format_exception(
                exception, exception, exception.__traceback__))[:-1]

            log.debug(traceback_string)

            CACHE.set(cache_key, ('exception', exception,
                traceback_string.replace('Traceback', 'CACHEd traceback')))

            exception_counter += 1

            if (limit_exception and exception_counter >= limit_exception or
                limit and index + 1 >= limit):
                break

    log.debug('phonology {0}/{1}: {2} result{3}, {4} no vowels, {5} exceptions'.format(
        perspective_cid, perspective_oid,
        len(result_list), '' if len(result_list) == 1 else 's',
        no_vowel_counter, exception_counter))

    # If we have no results, we indicate the situation and also show number of failures and number of
    # markups with no vowels.

    if not result_list:
        request.response.status = HTTPPreconditionFailed.code

        return {
            "error": "no markups for this query",
            "exception_counter": exception_counter,
            "no_vowel_counter": no_vowel_counter}

    # Otherwise we create and then serve Excel file.

    excel_book = xlwt.Workbook(encoding = "utf-8")
    sheet = excel_book.add_sheet("Sheet 1")

    sheet.write(0, 0, 'Transcription')
    sheet.write(0, 1, 'Longest (seconds) interval')
    sheet.write(0, 2, 'F1 (Hz)')
    sheet.write(0, 3, 'F2 (Hz)')
    sheet.write(0, 4, 'Highest intensity (dB) interval')
    sheet.write(0, 5, 'F1 (Hz)')
    sheet.write(0, 6, 'F2 (Hz)')
    sheet.write(0, 7, 'Coincidence')

    row_counter = 1

    for tier_result_list in result_list:
        for tier_number, tier_name, tier_result_seq_list in tier_result_list:

            if tier_result_seq_list == 'no_vowel':
                continue

            for tier_data in tier_result_seq_list:
                for index, tier_data_str in enumerate(tier_data):
                    sheet.write(row_counter, index, tier_data_str)

                row_counter += 1

    # Formatting column widths.

    sheet.col(0).width = 24 * 256
    sheet.col(1).width = 24 * 256
    sheet.col(2).width = 12 * 256
    sheet.col(3).width = 12 * 256
    sheet.col(4).width = 24 * 256
    sheet.col(5).width = 12 * 256
    sheet.col(6).width = 12 * 256
    sheet.col(7).width = 12 * 256

    excel_stream = io.BytesIO()
    excel_book.save(excel_stream)
    excel_stream.seek(0)

    # See http://stackoverflow.com/questions/2937465/what-is-correct-content-type-for-excel-files for Excel
    # content-type.

    response = Response(content_type = 'application/vnd.ms-excel')

    response.app_iter = FileIter(excel_stream)
    response.headers['Content-Disposition'] = "attachment; filename=phonology.xls"

    return response
コード例 #44
0
ファイル: toolbar.py プロジェクト: bennihepp/sandbox
    def toolbar_tween(request):
        root_path = request.route_path(ROOT_ROUTE_NAME)
        request.exc_history = exc_history
        remote_addr = request.remote_addr

        if (request.path.startswith(root_path) or (not remote_addr in hosts)):
            return handler(request)

        toolbar = DebugToolbar(request, panel_classes)
        request.debug_toolbar = toolbar

        _handler = handler

        for panel in toolbar.panels:
            _handler = panel.wrap_handler(_handler)

        try:
            response = _handler(request)
        except Exception:
            if exc_history is not None:
                tb = get_traceback(info=sys.exc_info(),
                                   skip=1,
                                   show_hidden_frames=False,
                                   ignore_system_exceptions=True)
                for frame in tb.frames:
                    exc_history.frames[frame.id] = frame

                exc_history.tracebacks[tb.id] = tb
                body = tb.render_full(request).encode('utf-8', 'replace')
                response = Response(body, status=500)
                toolbar.process_response(response)
                qs = {'token': exc_history.token, 'tb': str(tb.id)}
                msg = 'Exception at %s\ntraceback url: %s'
                exc_url = request.route_url(EXC_ROUTE_NAME, _query=qs)
                exc_msg = msg % (request.url, exc_url)
                logger.exception(exc_msg)
                return response
            else:
                logger.exception('Exception at %s' % request.url)
            raise

        else:
            if intercept_redirects:
                # Intercept http redirect codes and display an html page with a
                # link to the target.
                if response.status_int in redirect_codes:
                    redirect_to = response.location
                    redirect_code = response.status_int
                    if redirect_to:
                        content = render(
                            'pyramid_debugtoolbar:templates/redirect.mako', {
                                'redirect_to': redirect_to,
                                'redirect_code': redirect_code
                            },
                            request=request)
                        content = content.encode(response.charset)
                        response.content_length = len(content)
                        response.location = None
                        response.app_iter = [content]
                        response.status_int = 200

            toolbar.process_response(response)
            return response

        finally:
            # break circref
            del request.debug_toolbar
コード例 #45
0
def events(request):
    headers = [('Content-Type', 'text/event-stream'),
               ('Cache-Control', 'no-cache')]
    response = Response(headerlist=headers)
    response.app_iter = message_generator()
    return response
コード例 #46
0
def generate_pdf(appstruct):
    """
    this function receives an appstruct
    (a datastructure received via formsubmission)
    and prepares and returns a PDF using pdftk
    """
    DEBUG = False

    fdf_file = tempfile.NamedTemporaryFile()
    pdf_file = tempfile.NamedTemporaryFile()

    # import logging
    # log = logging.getLogger(__name__)
    # log.info("test ...! ")

    import os
    here = os.path.dirname(__file__)
    registration_form_template = os.path.join(here,
                                    "../customization/registrationForm/current-{}.pdf")

    try:
        pdf_to_be_used =registration_form_template.format(appstruct['locale'])
        open(pdf_to_be_used)
    except IOError:
        # deliberatly error out if fallback does not exist
        pdf_to_be_used = registration_form_template.format(customization.default_language)
        open(pdf_to_be_used)


    # convert the date in date_of_birth
    # print(
    #    "generate_pdf: appstruct: date of birth: %s") % (
    #        appstruct['date_of_birth'])
    # print(
    #    "generate_pdf: type of appstruct: date of birth: %s") % type(
    #        appstruct['date_of_birth'])
    # print(
    #    "generate_pdf: str of appstruct: date of birth: %s") % str(
    #        appstruct['date_of_birth'])
    # print("appstruct: date of birth: %s") % appstruct['date_of_birth']
    # print("appstruct: date of submission: %s") % appstruct[
    #    'date_of_submission']
    dob_ = time.strptime(str(appstruct['date_of_birth']), '%Y-%m-%d')
    # print("generate_pdf: date of birth: %s") % dob_
    dob = time.strftime("%d.%m.%Y", dob_)
    # print("generate_pdf: date of birth: %s") % dob
    # dos_ = time.strptime(
    #    str(appstruct['date_of_submission']),
    #    '%Y-%m-%d %H:%M:%S'
    # )
    # print("generate_pdf: date of submission: %s") % dos_
    dos = str(appstruct['date_of_submission'])
    # print("generate_pdf: date of submission: %s") % dos
    # print("generate_pdf: type of date of birth: %s") % type(dob)
    # print("generate_pdf: date of birth: %s") % dob

    # membership_type
    # FieldType: Button
    # FieldName: MembershipType
    # FieldFlags: 49152
    # FieldValue: 2
    # FieldJustification: Left
    # FieldStateOption: 1
    # FieldStateOption: 2
    # FieldStateOption: Off
    # print("the membership type: %s" % appstruct['membership_type'])

    # calculate the amount to be transferred
    # print("the amount: %s" % (appstruct['num_shares'] * 50))
    amount = str(appstruct['num_shares'] * 50)

# here we gather all information from the supplied data to prepare pdf-filling

    from datetime import datetime

    fields = [
        ('firstname', appstruct['firstname']),
        ('lastname', appstruct['lastname']),
        ('streetNo', appstruct['address1']),
        ('address2', appstruct['address2']),
        ('postcode', appstruct['postcode']),
        ('town', appstruct['city']),
        ('email', appstruct['email']),
        ('country', appstruct['country']),
        ('MembershipType', '1' if appstruct[
            'membership_type'] == u'normal' else '2'),
        ('numshares', str(appstruct['num_shares'])),
        ('dateofbirth', dob),
        ('submitted', dos),
        ('generated', str(datetime.now())),
        ('code', appstruct['email_confirm_code']),
        ('code2', appstruct['email_confirm_code']),  # for page 2
        ('amount', amount),  # for page 2
    ]

# generate fdf string

    fdf = forge_fdf("", fields, [], [], [])

# write it to a file

    if DEBUG:  # pragma: no cover
        print("== prepare: write fdf")

    fdf_file.write(fdf)
    fdf_file.seek(0)  # rewind to beginning

# process the PDF, fill in prepared data

    if DEBUG:  # pragma: no cover
        print("== PDFTK: fill_form & flatten")

        print("running pdftk...")
    pdftk_output = subprocess.call(
        [
            'pdftk',
            pdf_to_be_used,  # input pdf with form fields
            'fill_form', fdf_file.name,  # fill in values
            'output', pdf_file.name,  # output file
            'flatten',  # make form read-only
            # 'verbose'  # be verbose?
        ]
    )

    if DEBUG:  # pragma: no cover
        print(pdf_file.name)
    pdf_file.seek(0)

    if DEBUG:  # pragma: no cover
        print("===== pdftk output ======")
        print(pdftk_output)

    # return a pdf file
    from pyramid.response import Response
    response = Response(content_type='application/pdf')
    pdf_file.seek(0)  # rewind to beginning
    response.app_iter = open(pdf_file.name, "r")

    return response
コード例 #47
0
ファイル: utils.py プロジェクト: AnneGilles/c3sMembership
def generate_pdf(appstruct):
    """
    this function receives an appstruct
    (a datastructure received via formsubmission)
    and prepares and returns a PDF using pdftk
    """
    DEBUG = False

    fdf_file = tempfile.NamedTemporaryFile()
    pdf_file = tempfile.NamedTemporaryFile()

    #import logging
    #log = logging.getLogger(__name__)
    #log.info("test ...! ")

    import os
    here = os.path.dirname(__file__)
    declaration_pdf_de = os.path.join(
        here, "../pdftk/C3S-SCE-AFM-de-v01-20131029.pdf")
    declaration_pdf_en = os.path.join(
        here, "../pdftk/C3S-SCE-AFM-en-v01-20130909.pdf")

    # check for _LOCALE_, decide which language to use
    #print(appstruct['_LOCALE_'])
    if appstruct['_LOCALE_'] == "de":
        pdf_to_be_used = declaration_pdf_de
    elif appstruct['_LOCALE_'] == "en":
        pdf_to_be_used = declaration_pdf_en
    else:  # pragma: no cover
        # default fallback: english
        pdf_to_be_used = declaration_pdf_en

    # convert the date in date_of_birth
    #print(
    #    "generate_pdf: appstruct: date of birth: %s") % (
    #        appstruct['date_of_birth'])
    #print(
    #    "generate_pdf: type of appstruct: date of birth: %s") % type(
    #        appstruct['date_of_birth'])
    #print(
    #    "generate_pdf: str of appstruct: date of birth: %s") % str(
    #        appstruct['date_of_birth'])
    #print("appstruct: date of birth: %s") % appstruct['date_of_birth']
    #print("appstruct: date of submission: %s") % appstruct['date_of_submission']
    dob_ = time.strptime(str(appstruct['date_of_birth']), '%Y-%m-%d')
    #print("generate_pdf: date of birth: %s") % dob_
    dob = time.strftime("%d.%m.%Y", dob_)
    #print("generate_pdf: date of birth: %s") % dob
    #dos_ = time.strptime(
    #    str(appstruct['date_of_submission']),
    #    '%Y-%m-%d %H:%M:%S'
    #)
    #print("generate_pdf: date of submission: %s") % dos_
    dos = str(appstruct['date_of_submission'])
    #print("generate_pdf: date of submission: %s") % dos
    #print("generate_pdf: type of date of birth: %s") % type(dob)
    #print("generate_pdf: date of birth: %s") % dob

    # here we gather all information from the supplied data to prepare pdf-filling

    from datetime import datetime

    fields = [
        ('firstname', appstruct['firstname']),
        ('lastname', appstruct['lastname']),
        ('streetNo', appstruct['address1']),
        ('address2', appstruct['address2']),
        ('postcode', appstruct['postcode']),
        ('town', appstruct['city']),
        ('email', appstruct['email']),
        ('country', appstruct['country']),
        ('numshares', str(appstruct['num_shares'])),
        #        ('composer',
        #         'Yes' if appstruct['activity'].issuperset(['composer']) else 'Off'),
        #        ('lyricist',
        #         'Yes' if appstruct['activity'].issuperset(['lyricist']) else 'Off'),
        #        ('producer', 'Yes' if appstruct['activity'].issuperset(
        #            ['music producer']) else 'Off'),
        #        ('remixer',
        #         'Yes' if appstruct['activity'].issuperset(['remixer']) else 'Off'),
        #        ('dj',
        #         'Yes' if appstruct['activity'].issuperset(['dj']) else 'Off'),
        #('YesDataProtection',
        #'Yes' if appstruct[
        #        #'noticed_dataProtection'] == u"(u'yes',)" else 'Off'),
        #        ('inColSoc', '1' if appstruct['member_of_colsoc'] == u'yes' else '2'),
        #        ('inColSocName',
        #        appstruct['name_of_colsoc'] if appstruct['member_of_colsoc'] == u'yes' else ''),
        #        ('URL', appstruct['opt_URL']),
        #        ('bandPseudonym', appstruct['opt_band']),
        #        ('investMmbr', '1' if appstruct['invest_member'] == u'yes' else '2'),
        ('dateofbirth', dob),
        ('submitted', dos),
        ('generated', str(datetime.now()))
    ]

    # generate fdf string

    fdf = forge_fdf("", fields, [], [], [])

    # write it to a file

    if DEBUG:  # pragma: no cover
        print("== prepare: write fdf")

    fdf_file.write(fdf)
    fdf_file.seek(0)  # rewind to beginning

    # process the PDF, fill in prepared data

    if DEBUG:  # pragma: no cover
        print("== PDFTK: fill_form & flatten")

        print("running pdftk...")
    pdftk_output = subprocess.call([
        'pdftk',
        pdf_to_be_used,  # input pdf with form fields
        'fill_form',
        fdf_file.name,  # fill in values
        'output',
        pdf_file.name,  # output file
        'flatten',  # make form read-only
        #'verbose'  # be verbose?
    ])

    if DEBUG:  # pragma: no cover
        print(pdf_file.name)
    pdf_file.seek(0)

    if DEBUG:  # pragma: no cover
        print("===== pdftk output ======")
        print(pdftk_output)


# return a pdf file
    from pyramid.response import Response
    response = Response(content_type='application/pdf')
    pdf_file.seek(0)  # rewind to beginning
    response.app_iter = open(pdf_file.name, "r")

    return response
コード例 #48
0
def join_membership(request):

    locale = get_localizer(request)

    print "-- locale: " + str(locale)
    #print "-- dir(locale): " + str(dir(locale))
    #print "-- help(locale): " + str(help(locale))
    print "-- locale.locale_name: " + locale.locale_name

    locale_name = get_locale_name(request)
    print "-- locale_name: " + str(locale_name)
    #print "-- dir(locale_name): " + str(dir(locale_name))
    #print "-- help(locale): " + str(help(locale))
    #print "-- locale_name.locale_name: " + locale.locale_name


    form = Form(request, schema = MembershipSchema)

    if form.validate():
        print "the form validated OK"

    if 'form.submitted' in request.POST and form.validate():
        print "form was submitted and validated OK."
        #print "membership_type: " + str(form.data['membership_type'])
        #if 'supporter' in form.data['membership_type']:
        #    print "found 'supporter'"
        #    membership_fee = 42
        #    FoerderMitglied = True
        #    OrdentlichesMitglied = False
        #else:
        #    print "'supporter' not found"
        #    membership_fee = 23
        #    FoerderMitglied = 'Off'
        #    OrdentlichesMitglied = 'On'

        #print "request.POST: " + str(request.POST)

        fields = [
            ('Name', form.data['lastname']),
            ('Surname', form.data['surname']),
            ('Street', form.data['address1']),
            ('PostCodeCity', form.data['address2']),
            ('Telephone', form.data['phone']),
            ('Email', form.data['email']),
            ('OrdentlichesMitglied', 'OrdentlichesMitglied'), # not working
            ('FoerderMitglied', 'FoerderMitglied'), # not working. < ToDo ^
            ]
        #generate fdf string
        fdf = forge_fdf("", fields, [], [], [])
        # write to file
        my_fdf_filename = "fdf.fdf"
        import os
        fdf_file = open(my_fdf_filename , "w")
        # fdf_file.write(fdf.encode('utf8'))
        fdf_file.write(fdf)
        fdf_file.close()

        print os.popen('pdftk pdftk/beitrittserklaerung.pdf fill_form %s output formoutput.pdf flatten'% (my_fdf_filename)).read()

        #print os.popen('pwd').read()
        #print os.popen('ls').read()

        # combine
        print "combining with bank account form"
        print os.popen('pdftk formoutput.pdf pdftk/bankaccount.pdf output combined.pdf').read()
        print "combined personal form and bank form"


        # return a pdf file
        from pyramid.response import Response
        response = Response(content_type='application/pdf')
        response.app_iter = open("combined.pdf", "r")
        return response




    return {
        'form': FormRenderer(form)
        }
コード例 #49
0
 def stdout(self):
     """Returns file object of stdout.txt file of job"""
     response = Response(content_type='text/plain')
     response.app_iter = self.job.stdout()
     return response
コード例 #50
0
ファイル: httpd.py プロジェクト: gustavofonseca/balaio
def get_file_from_attempt(request):
    """
    Get a portion of a package bound to an Attempt.

    Get a specific member, by name (raw):
    `/api/:api_id/files/:attempt_id/:target?file=:member&raw=true`

    Get a specific member, by name:
    `/api/:api_id/files/:attempt_id/:target.zip?file=:member`

    Get more than one specific members, by name:
    `/api/:api_id/files/:attempt_id/:target.zip?file=:member&file=:member2`

    Get the full package:
    `/api/:api_id/files/:attempt_id/:target.zip?full=true`
    """
    has_body = False

    attempt_id = request.matchdict.get('attempt_id', None)
    target = request.matchdict.get('target', None)

    try:
        attempt = request.db.query(models.Attempt).get(attempt_id)
    except DataError:
        return HTTPNotFound()

    if attempt is None:
        return HTTPNotFound()

    is_full = asbool(request.GET.get('full', False))
    is_raw = asbool(request.GET.get('raw', False))

    if is_full and is_raw:
        return HTTPBadRequest()

    response = Response(status_code=200)

    # Get the full package.
    if is_full:
        response.content_type = 'application/zip'
        response.app_iter = open(attempt.filepath, 'rb')
        has_body = True

    elif is_raw:
        member_name = request.GET.get('file')
        response.content_type = 'text/xml'
        response.app_iter = attempt.analyzer.get_fp(member_name)
        has_body = True

    else:
        response.content_type = 'application/zip'

        # Get partial portions of the package.
        files = [member for attr, member in request.GET.items() if attr == 'file']

        try:
            if files:
                response.app_iter = attempt.analyzer.subzip(*files)
                has_body = True
        except ValueError:
            return HTTPBadRequest()

    return response if has_body else HTTPBadRequest()
コード例 #51
0
ファイル: toolbar.py プロジェクト: kevinruizmayoral/zodiac2
    def toolbar_tween(request):
        root_path = request.route_path(ROOT_ROUTE_NAME)
        exclude = [root_path] + exclude_prefixes
        request.exc_history = exc_history
        last_proxy_addr = None

        try:
            p = request.path
        except UnicodeDecodeError as e:
            raise URLDecodeError(e.encoding, e.object, e.start, e.end,
                                 e.reason)

        starts_with_excluded = list(filter(None, map(p.startswith, exclude)))

        if request.remote_addr:
            last_proxy_addr = last_proxy(request.remote_addr)

        if last_proxy_addr is None \
            or starts_with_excluded \
            or not addr_in(last_proxy_addr, hosts) \
            or auth_check and not auth_check(request):
            return handler(request)

        toolbar = DebugToolbar(request, panel_classes)
        request.debug_toolbar = toolbar

        _handler = handler

        for panel in toolbar.panels:
            _handler = panel.wrap_handler(_handler)

        try:
            response = _handler(request)
        except Exception:
            if exc_history is not None:
                tb = get_traceback(info=sys.exc_info(),
                                   skip=1,
                                   show_hidden_frames=False,
                                   ignore_system_exceptions=True)
                for frame in tb.frames:
                    exc_history.frames[frame.id] = frame

                exc_history.tracebacks[tb.id] = tb
                body = tb.render_full(request).encode('utf-8', 'replace')
                response = Response(body, status=500)
                toolbar.process_response(response)
                qs = {'token': exc_history.token, 'tb': str(tb.id)}
                msg = 'Exception at %s\ntraceback url: %s'
                exc_url = request.route_url(EXC_ROUTE_NAME, _query=qs)
                exc_msg = msg % (request.url, exc_url)
                _logger.exception(exc_msg)
                return response
            else:
                _logger.exception('Exception at %s' % request.url)
            raise

        else:
            if intercept_redirects:
                # Intercept http redirect codes and display an html page with a
                # link to the target.
                if response.status_int in redirect_codes:
                    redirect_to = response.location
                    redirect_code = response.status_int
                    if redirect_to:
                        content = render(
                            'pyramid_debugtoolbar:templates/redirect.dbtmako',
                            {
                                'redirect_to': redirect_to,
                                'redirect_code': redirect_code
                            },
                            request=request)
                        content = content.encode(response.charset)
                        response.content_length = len(content)
                        response.location = None
                        response.app_iter = [content]
                        response.status_int = 200

            if not show_on_exc_only:
                toolbar.process_response(response)
            return response

        finally:
            # break circref
            del request.debug_toolbar
コード例 #52
0
ファイル: views.py プロジェクト: scieloorg/scielobooks
def pdf_file(request):
    sbid = request.matchdict['sbid']
    req_part = request.matchdict['part'].split('-')

    monograph = Monograph.get(request.db, sbid)
    if len(req_part) == 2:

        if req_part[1] != getattr(monograph, 'isbn',
                                  None) and req_part[1] != getattr(
                                      monograph, 'eisbn', None):
            raise exceptions.NotFound()

        try:
            url = static_url(
                'scielobooks:fileserver/{0}/pdf/{1}.pdf'.format(
                    sbid, request.matchdict['part']), request)
            u = urllib2.urlopen(url)
            return HTTPFound(location=url)
        except (urllib2.HTTPError, urllib2.URLError):
            #cannot find in static file server, fetch from db
            try:
                pdf_file = request.db.fetch_attachment(
                    monograph._id, monograph.pdf_file['filename'], stream=True)
            except (couchdbkit.ResourceNotFound, AttributeError):
                raise exceptions.NotFound()
            else:
                if asbool(
                        request.registry.settings.get('fileserver_sync_enable',
                                                      False)):
                    if req_part[1] == getattr(monograph, 'eisbn',
                                              None) and getattr(
                                                  monograph, 'isbn', None):
                        #when the eisbn is registered at an already published book. The eisbn takes
                        #precedence when generating the shortname.
                        source_filename = '-'.join([
                            monograph.shortname.split('-')[0], monograph.isbn
                        ])

                        try:
                            url = static_url(
                                'scielobooks:fileserver/{0}/pdf/{1}.pdf'.
                                format(sbid, source_filename), request)
                            u = urllib2.urlopen(url)
                        except (urllib2.HTTPError, urllib2.URLError):
                            # there are no static files available for this book.
                            fresh_pdf_file = request.db.fetch_attachment(
                                monograph._id,
                                monograph.pdf_file['filename'],
                                stream=True)
                            functions.transfer_static_file(
                                request, fresh_pdf_file, monograph._id,
                                monograph.shortname, 'pdf', request.registry.
                                settings['fileserver_remotebase'])
                        else:
                            dest_filename = monograph.shortname
                            functions.symlink_static_file(
                                request, monograph._id, source_filename,
                                dest_filename, 'pdf', request.registry.
                                settings['fileserver_remotebase'])
                    else:
                        fresh_pdf_file = request.db.fetch_attachment(
                            monograph._id,
                            monograph.pdf_file['filename'],
                            stream=True)
                        functions.transfer_static_file(
                            request, fresh_pdf_file, monograph._id,
                            monograph.shortname, 'pdf',
                            request.registry.settings['fileserver_remotebase'])
    else:
        parts = get_book_parts(monograph._id, request)
        try:
            selected_part = parts[int(req_part[2])]
        except (IndexError, ValueError):
            raise exceptions.NotFound()

        part = Part.get(request.db, selected_part['part_sbid'])
        try:
            pdf_file = request.db.fetch_attachment(part._id,
                                                   part.pdf_file['filename'],
                                                   stream=True)
        except (couchdbkit.ResourceNotFound, AttributeError):
            raise exceptions.NotFound()

    response = Response(content_type='application/pdf',
                        expires=datetime_rfc822(365))
    response.app_iter = pdf_file
    try:
        response.etag = str(hash(pdf_file))
    except TypeError:
        #cannot generate a hash for the object, return it without the ETag
        pass

    return response
コード例 #53
0
def gen_cert(member):
    '''
    Utility function: create a membership certificate PDF file using pdflatex
    '''
    here = os.path.dirname(__file__)

    if 'de' in member.locale:
        latex_background_image = os.path.abspath(
            os.path.join(here, '../certificate/Urkunde_Hintergrund_blank.pdf'))
        # latex header and footer
        latex_header_tex = os.path.abspath(
            os.path.join(here, '../certificate/urkunde_header_de.tex'))
        latex_footer_tex = os.path.abspath(
            os.path.join(here, '../certificate/urkunde_footer_de.tex'))
    else:
        latex_background_image = os.path.abspath(
            os.path.join(here, '../certificate/Urkunde_Hintergrund_blank.pdf'))
        # latex header and footer
        latex_header_tex = os.path.abspath(
            os.path.join(here, '../certificate/urkunde_header_en.tex'))
        latex_footer_tex = os.path.abspath(
            os.path.join(here, '../certificate/urkunde_footer_en.tex'))

    sign_meik = os.path.abspath(
        os.path.join(here, '../certificate/sign_meik.png'))
    sign_julian = os.path.abspath(
        os.path.join(here, '../certificate/sign_julian.png'))

    # a temporary directory for the latex run
    tempdir = tempfile.mkdtemp()

    latex_file = tempfile.NamedTemporaryFile(
        suffix='.tex',
        dir=tempdir,
        delete=False,  # directory will be deleted anyways
    )

    # using tempfile
    pdf_file = tempfile.NamedTemporaryFile(
        dir=tempdir,
        delete=False,  # directory will be deleted anyways
    )
    pdf_file.name = latex_file.name.replace('.tex', '.pdf')

    is_founder = True if 'dungHH_' in member.email_confirm_code else False
    # prepare the certificate text
    if member.locale == 'de':  # german
        hereby_confirmed = u'Hiermit wird bestätigt, dass'
        is_member = u'Mitglied der Cultural Commons Collecting Society SCE ' \
                    u'mit beschränkter Haftung (C3S SCE) ist'
        one_more_share = u' und einen weiteren Geschäftsanteil übernommen hat'
        several_shares = u' weitere Geschäftsanteile übernommen hat'
        and_block = u' und '
        if is_founder:
            confirm_date = (
                u'Der Beitritt erfolgte im Rahmen der Gründung am 25.09.2013')
        else:
            confirm_date = u'Der Beitritt wurde am {} zugelassen'.format(
                datetime.strftime(member.membership_date, '%d.%m.%Y'))
        mship_num = u'Die Mitgliedsnummer lautet {}.'.format(
            member.membership_number)
        mship_num_text = u'Mitgliedsnummer {}'.format(member.membership_number)
        exec_dir = u'Geschäftsführender Direktor'

    else:  # default fallback is english
        hereby_confirmed = u'This is to certify that'
        is_member = u'is a member of the >>Cultural Commons Collecting ' \
                    u'Society SCE mit beschränkter Haftung (C3S SCE)<<'
        one_more_share = u' and has subscribed to one additional share'
        several_shares = u'additional shares'
        and_block = u' and has subscribed to'
        if is_founder:
            confirm_date = (u'Membership was acquired as a founding member '
                            'on the 25th of September 2013')
        else:
            confirm_date = u'Registered on the {}'.format(
                datetime.strftime(member.membership_date, '%Y-%m-%d'))
        mship_num = u'The membership number is {}.'.format(
            member.membership_number)
        mship_num_text = u'membership number {}'.format(
            member.membership_number)
        exec_dir = 'Executive Director'

    # construct latex_file
    latex_data = '''
\\input{%s}
\\def\\backgroundImage{%s}
\\def\\txtBlkHerebyConfirmed{%s}
\\def\\firstName{%s}
\\def\\lastName{%s}
\\def\\addressOne{%s}
\\def\\postCode{%s}
\\def\\city{%s}
\\def\\numShares{%s}
\\def\\numAddShares{%s}
\\def\\txtBlkIsMember{%s}
\\def\\txtBlkMembershipNumber{%s}
\\def\\txtBlkConfirmDate{%s}
\\def\\signDate{%s}
\\def\\signMeik{%s}
\\def\\signJulian{%s}
\\def\\txtBlkCEO{%s}
\\def\\txtBlkMembershipNum{%s}
    ''' % (latex_header_tex, latex_background_image, hereby_confirmed,
           TexTools.escape(member.firstname), TexTools.escape(member.lastname),
           TexTools.escape(member.address1), TexTools.escape(member.postcode),
           TexTools.escape(member.city), member.num_shares, member.num_shares -
           1, is_member, TexTools.escape(mship_num), confirm_date,
           (datetime.strftime(date.today(), "%d.%m.%Y")
            if member.locale == 'de' else date.today()), sign_meik,
           sign_julian, exec_dir, mship_num_text)
    if DEBUG:  # pragma: no cover
        print('#' * 60)
        print(member.is_legalentity)
        print(member.lastname)
        print('#' * 60)
    if member.is_legalentity:
        latex_data += '\n\\def\\company{%s}' % TexTools.escape(member.lastname)
    if member.address2 is not u'':  # add address part 2 iff exists
        latex_data += '\n\\def\\addressTwo{%s}' % TexTools.escape(
            member.address2)
    if member.num_shares > 1:  # how many shares?
        if member.num_shares == 2:  # iff member has exactely two shares...
            latex_data += '\n\\def\\txtBlkAddShares{%s.}' % one_more_share
        if member.num_shares > 2:  # iff more than two
            latex_data += '\n\\def\\txtBlkAddShares{%s %s %s.}' % (
                and_block, member.num_shares - 1, several_shares)
    else:  # iff member has exactely one share..
        latex_data += '\n\\def\\txtBlkAddShares{.}'

    # finish the latex document
    latex_data += '\n\\input{%s}' % latex_footer_tex

    if DEBUG:  # pragma: no cover
        print '*' * 70
        print('*' * 30, 'latex data: ', '*' * 30)
        print '*' * 70
        print latex_data
        print '*' * 70
    latex_file.write(latex_data.encode('utf-8'))
    latex_file.seek(0)  # rewind

    # pdflatex latex_file to pdf_file
    # pdflatex_output =
    subprocess.call(
        ['pdflatex',
         '-output-directory=%s' % tempdir, latex_file.name],
        stdout=open(os.devnull, 'w'),
        stderr=subprocess.STDOUT  # hide output
    )

    # return a pdf file
    response = Response(content_type='application/pdf')
    response.app_iter = open(pdf_file.name, "r")
    shutil.rmtree(tempdir, ignore_errors=True)  # delete temporary directory
    return response
コード例 #54
0
    def __call__(self):
        request = self.request
        user = request.user

        if not user:
            return make_401_error("Access Denied")

        if "o211export" not in user.cic.ExternalAPIs:
            return make_401_error("Insufficient Permissions")

        model_state = modelstate.ModelState(request)
        model_state.schema = O211ExportOptionsSchema()
        model_state.form.method = None

        if not model_state.validate():
            if model_state.is_error("date"):
                msg = "Invalid date"
            elif model_state.is_error("feed"):
                msg = "Invalid feed."
            else:
                msg = "An unknown error occurred."

            return make_internal_server_error(msg)

        feed = model_state.value("feed")
        date = model_state.value("date")

        args = []
        if not feed:
            sql = [
                "SELECT CAST(record AS nvarchar(max)) AS record FROM O211SC_RECORD_EXPORT btd"
            ]

            if request.viewdata.cic.PB_ID:
                args.append(request.viewdata.cic.PB_ID)
                sql.append(
                    " INNER JOIN CIC_BT_PB pb ON btd.NUM=pb.NUM AND pb.PB_ID=?"
                )

            if date:
                args.append(date)
                sql.append("""
                        WHERE EXISTS (SELECT * FROM GBL_BaseTable_History h
                            INNER JOIN GBL_FieldOption fo
                                    ON h.FieldID=fo.FieldID
                                WHERE h.NUM=btd.NUM AND h.LangID=btd.LangID
                                    AND h.MODIFIED_DATE >= ?
                                    AND fo.FieldName IN ('ORG_LEVEL_1','ORG_LEVEL_2','ORG_LEVEL_3','ORG_LEVEL_4','ORG_LEVEL_5',
                                    'ACCESSIBILITY','AFTER_HRS_PHONE','ALT_ORG','APPLICATION','AREAS_SERVED',
                                    'CONTACT_1','CONTACT_2','EXEC_1','EXEC_2','VOLCONTACT',
                                    'CRISIS_PHONE','ELIGIBILITY','E_MAIL','FAX','FORMER_ORG','HOURS','INTERSECTION',
                                    'LANGUAGES','LOCATED_IN_CM','MAIL_ADDRESS','PUBLIC_COMMENTS',
                                    'OFFICE_PHONE','SERVICE_LEVEL','RECORD_OWNER','DESCRIPTION','SITE_ADDRESS','SUBJECTS',
                                    'TDD_PHONE','TOLL_FREE_PHONE','WWW_ADDRESS', 'UPDATE_DATE', 'NUM', 'SUBMIT_CHANGES_TO', 'SOURCE_DB')
                            )""")

            sql = " ".join(sql)

        elif feed == "recordids":
            sql = [
                "SELECT CAST((SELECT id=btd.NUM, language=btd.Culture FROM O211SC_RECORD_EXPORT btd"
            ]
            if request.viewdata.cic.PB_ID:
                args.append(request.viewdata.cic.PB_ID)
                sql.append(
                    " INNER JOIN CIC_BT_PB pb ON btd.NUM=pb.NUM AND pb.PB_ID=?"
                )

            sql.append(
                "FOR XML PATH('record'), TYPE) AS nvarchar(max)) AS data ")

            sql = " ".join(sql)

        elif feed == "taxonomy":
            sql = "SELECT CAST(record AS nvarchar(max)) AS record from O211SC_TAXONOMY_EXPORT"

        elif feed == "community":
            sql = "SELECT CAST(record AS nvarchar(max)) AS record from O211SC_COMMUNITY_EXPORT"

        else:
            # XXX we should never get here
            return make_internal_server_error("Invalid feed.")

        log.debug("sql: %s", sql)
        with request.connmgr.get_connection("admin") as conn:
            cursor = conn.execute(sql, *args)

            data = [x[0] for x in cursor.fetchall()]

            cursor.close()

        data.insert(0, '<?xml version="1.0" encoding="UTF-8"?>\r\n<records>')
        data.append("</records>")
        data = "\r\n".join(data).encode("utf8")

        file = tempfile.TemporaryFile()
        zip = zipfile.ZipFile(file, "w", zipfile.ZIP_DEFLATED)
        zip.writestr("export.xml", data)
        zip.close()
        length = file.tell()
        file.seek(0)

        res = Response(content_type="application/zip", charset=None)
        res.app_iter = FileIterator(file)
        res.content_length = length

        res.headers["Content-Disposition"] = "attachment;filename=Export.zip"
        return res
コード例 #55
0
ファイル: export.py プロジェクト: OpenCIOC/onlineresources
    def __call__(self):
        request = self.request
        user = request.user

        if not user.vol.SuperUser:
            self._security_failure()

        sql = (
            "SELECT * FROM VOL_SHARE_VIEW_EN vo WHERE "
            + request.viewdata.WhereClauseVOL.replace("NON_PUBLIC", "XNP")
            .replace("DELETION_DATE", "XDEL")
            .replace("UPDATE_DATE", "XUPD")
            .replace("vod.", "vo.")
            .replace("vo.MemberID=1", "1=1")
        )

        log.debug("SQL: %s", sql)

        log.debug("sql: %s", sql)
        data = [
            b'<?xml version="1.0" encoding="UTF-8"?>\r\n<ROOT xmlns="urn:ciocshare-schema-vol">'
        ]
        with request.connmgr.get_connection("admin") as conn:
            cursor = conn.execute(sql)

            data.extend(
                "".join(
                    [
                        '<RECORD VNUM="',
                        str(x.VNUM),
                        '" RECORD_OWNER="',
                        str(x.RECORD_OWNER),
                        '" HAS_ENGLISH="',
                        str(x.HAS_ENGLISH),
                        '" HAS_FRENCH="',
                        str(x.HAS_FRENCH),
                        '">',
                    ]
                    + list(map(str, x[7:]))
                    + ["</RECORD>"]
                ).encode("utf8")
                for x in cursor.fetchall()
            )

            cursor.close()

        data.append(b"</ROOT>")
        data = b"\r\n".join(data)

        file = tempfile.TemporaryFile()
        zip = zipfile.ZipFile(file, "w", zipfile.ZIP_DEFLATED)
        zip.writestr("export.xml", data)
        zip.close()
        length = file.tell()
        file.seek(0)

        res = Response(content_type="application/zip", charset=None)
        res.app_iter = FileIterator(file)
        res.content_length = length

        res.headers["Content-Disposition"] = "attachment;filename=Export.zip"
        return res
コード例 #56
0
def generate_pdf(appstruct):
    """
    this function receives an appstruct
    (a datastructure received via formsubmission)
    and prepares and returns a PDF using pdftk
    """
    DEBUG = False

    fdf_file = tempfile.NamedTemporaryFile()
    pdf_file = tempfile.NamedTemporaryFile()

    declaration_pdf_de = "pdftk/absichtserklaerung.pdf"
    declaration_pdf_en = "pdftk/declaration-of-intent.pdf"

    # check for _LOCALE_, decide which language to use
    #print(appstruct['_LOCALE_'])
    if appstruct['_LOCALE_'] == "de":
        pdf_to_be_used = declaration_pdf_de
    elif appstruct['_LOCALE_'] == "en":
        pdf_to_be_used = declaration_pdf_en
    else:  # pragma: no cover
        # default fallback: english
        pdf_to_be_used = declaration_pdf_en

# here we gather all information from the supplied data to prepare pdf-filling

    fields = [
        ('FirstName', appstruct['firstname']),
        ('LastName', appstruct['lastname']),
        ('streetNo', appstruct['address1']),
        ('address2', appstruct['address2']),
        ('postCode', appstruct['postCode']),
        ('city', appstruct['city']),
        ('email', appstruct['email']),
        ('country', appstruct['country']),
        ('region', appstruct['region']),
        ('composer',
         'Yes' if appstruct['activity'].issuperset(['composer']) else 'Off'),
        ('lyricist',
         'Yes' if appstruct['activity'].issuperset(['lyricist']) else 'Off'),
        ('musician',
         'Yes' if appstruct['activity'].issuperset(['musician']) else 'Off'),
        ('producer', 'Yes'
         if appstruct['activity'].issuperset(['music producer']) else 'Off'),
        ('remixer',
         'Yes' if appstruct['activity'].issuperset(['remixer']) else 'Off'),
        ('dj', 'Yes' if appstruct['activity'].issuperset(['dj']) else 'Off'),
        ('YesDataProtection', 'Yes'
         if appstruct['noticed_dataProtection'] == u"(u'yes',)" else 'Off'),
        ('YesDeclaration', 'Yes'
         if appstruct['understood_declaration'] == u"(u'yes',)" else 'Off'),
        ('YesNotification',
         'Yes' if appstruct['consider_joining'] == u"(u'yes',)" else 'Off'),
        ('created3', 1 if appstruct['at_least_three_works'] == u'yes' else 2),
        ('inColSoc', 1 if appstruct['member_of_colsoc'] == u'yes' else 2),
    ]

    # generate fdf string

    fdf = forge_fdf("", fields, [], [], [])

    # write it to a file

    if DEBUG:  # pragma: no cover
        print("== prepare: write fdf")

    fdf_file.write(fdf)
    fdf_file.seek(0)  # rewind to beginning

    # process the PDF, fill in prepared data

    if DEBUG:  # pragma: no cover
        print("== PDFTK: fill_form & flatten")

        print("running pdftk...")
    pdftk_output = subprocess.call([
        'pdftk',
        pdf_to_be_used,  # input pdf with form fields
        'fill_form',
        fdf_file.name,  # fill in values
        'output',
        pdf_file.name,  # output file
        'flatten',  # make form read-only
        #            'verbose'  # be verbose?
    ])

    if DEBUG:  # pragma: no cover
        print(pdf_file.name)
    pdf_file.seek(0)

    if DEBUG:  # pragma: no cover
        print("===== pdftk output ======")
        print(pdftk_output)


# return a pdf file
    from pyramid.response import Response
    response = Response(content_type='application/pdf')
    pdf_file.seek(0)  # rewind to beginning
    response.app_iter = open(pdf_file.name, "r")

    return response
コード例 #57
0
def member_list_date_pdf_view(request):
    """
    The membership list *for a given date* for printout as PDF.
    The date is supplied in and parsed from the URL, e.g.
    http://0.0.0.0:6543/aml-2014-12-31.pdf

    The PDF is generated using pdflatex.

    If the date is not parseable, an error message is shown.
    """
    effective_date_string = ''
    try:
        effective_date_string = request.matchdict['date']
        effective_date = datetime.strptime(effective_date_string, '%Y-%m-%d') \
            .date()
    except (KeyError, ValueError):
        request.session.flash(
            "Invalid date! '{}' does not compute! "
            "try again, please! (YYYY-MM-DD)".format(
                effective_date_string),
            'message_to_user'
        )
        return HTTPFound(request.route_url('error_page'))

    shares_count_printed = 0

    # TODO: repositories are data layer and must only be used by the business
    # layer. Introduce business layer logic which uses the repositories and can
    # be accessed by this view via the request.
    shares_count = request.registry.share_information.get_share_count(
        effective_date)

    member_information = request.registry.member_information
    members_count = member_information.get_accepted_members_count(
        effective_date)
    members = member_information.get_accepted_members_sorted(
        effective_date)

    """
    Then a LaTeX file is constructed...
    """
    here = os.path.dirname(__file__)
    latex_header_tex = os.path.abspath(
        os.path.join(here, '../membership_list_pdflatex/header'))
    latex_footer_tex = os.path.abspath(
        os.path.join(here, '../membership_list_pdflatex/footer'))

    # a temporary directory for the latex run
    tempdir = tempfile.mkdtemp()
    # now we prepare a .tex file to be pdflatex'ed
    latex_file = tempfile.NamedTemporaryFile(
        suffix='.tex',
        dir=tempdir,
        delete=False,  # directory will be deleted anyways
    )
    # and where to store the output
    pdf_file = tempfile.NamedTemporaryFile(
        dir=tempdir,
        delete=False,  # directory will be deleted anyways
    )
    pdf_file.name = latex_file.name.replace('.tex', '.pdf')

    # construct latex data: header + variables
    latex_data = '''
\\input{%s}
\\def\\numMembers{%s}
\\def\\numShares{%s}
\\def\\sumShares{%s}
\\def\\today{%s}
    ''' % (
        latex_header_tex,
        members_count,
        shares_count,
        shares_count * 50,
        effective_date.strftime('%d.%m.%Y'),
    )

    # add to the latex document
    latex_data += '''
\\input{%s}''' % latex_footer_tex

    # print '*' * 70
    # print latex_data
    # print '*' * 70
    latex_file.write(latex_data.encode('utf-8'))

    # make table rows per member
    for member in members:
        address = '''\\scriptsize{}'''
        address += '''{}'''.format(
            unicode(TexTools.escape(member.address1)).encode('utf-8'))

        # check for contents of address2:
        if len(member.address2) > 0:
            address += '''\\linebreak {}'''.format(
                unicode(TexTools.escape(member.address2)).encode('utf-8'))
        # add more...
        address += ''' \\linebreak {} '''.format(
            unicode(TexTools.escape(member.postcode)).encode('utf-8'))
        address += '''{}'''.format(
            unicode(TexTools.escape(member.city)).encode('utf-8'))
        address += ''' ({})'''.format(
            unicode(TexTools.escape(member.country)).encode('utf-8'))

        member_share_count = \
            request.registry.share_information.get_member_share_count(
                member.membership_number,
                effective_date)
        shares_count_printed += member_share_count

        membership_loss = u''
        if member.membership_loss_date is not None:
            membership_loss += \
                member.membership_loss_date.strftime('%d.%m.%Y') + \
                '\\linebreak '
        if member.membership_loss_type is not None:
            membership_loss += unicode(TexTools.escape(
                member.membership_loss_type)).encode('utf-8')
        latex_file.write(
            ''' {0} & {1} & {2} & {3} & {4} & {5} & {6}  \\\\\\hline %
            '''.format(
                TexTools.escape(member.lastname).encode('utf-8'),  # 0
                ' \\footnotesize ' + TexTools.escape(
                    member.firstname).encode('utf-8'),  # 1
                ' \\footnotesize ' + TexTools.escape(
                    str(member.membership_number)),  # 2
                address,  # 3
                ' \\footnotesize ' + member.membership_date.strftime(
                    '%d.%m.%Y'),  # 4
                ' \\footnotesize ' + membership_loss + ' ',  # 5
                ' \\footnotesize ' + str(member_share_count)  # 6
            ))

    latex_file.write('''
%\\end{tabular}%
\\end{longtable}%
\\label{LastPage}
\\end{document}
''')
    latex_file.seek(0)  # rewind

    # pdflatex latex_file to pdf_file
    fnull = open(os.devnull, 'w')  # hide output
    pdflatex_output = subprocess.call(
        [
            'pdflatex',
            '-output-directory=%s' % tempdir,
            latex_file.name
        ],
        stdout=fnull, stderr=subprocess.STDOUT  # hide output
    )
    if DEBUG:  # pragma: no cover
        print("the output of pdflatex run: %s" % pdflatex_output)

    # if run was a success, run X times more...
    if pdflatex_output == 0:
        for i in range(2):
            pdflatex_output = subprocess.call(
                [
                    'pdflatex',
                    '-output-directory=%s' % tempdir,
                    latex_file.name
                ],
                stdout=fnull, stderr=subprocess.STDOUT  # hide output
            )
            if DEBUG:  # pragma: no cover
                print("run #{} finished.".format(i+1))

    # sanity check: did we print exactly as many shares as calculated?
    assert(shares_count == shares_count_printed)

    # return a pdf file
    response = Response(content_type='application/pdf')
    response.app_iter = open(pdf_file.name, "r")
    shutil.rmtree(tempdir, ignore_errors=True)  # delete temporary directory
    return response
コード例 #58
0
def make_dues17_invoice_no_pdf(request):
    """
    Create invoice PDFs on-the-fly.

    This view checks supplied information (in URL) against info in database
    and returns
    - an error message OR
    - a PDF as receipt

    === ===========================================================
    URL http://app:port/dues_invoice_no/EMAIL/CAQJGCGUFW/C3S-dues17-0001.pdf
    === ===========================================================

    """
    token = request.matchdict['code']
    invoice_number = request.matchdict['i']

    try:
        member = C3sMember.get_by_dues17_token(token)
        assert member is not None
        assert member.dues17_token == token
    except AssertionError:
        request.session.flash(
            u"This member and token did not match!",
            'message_to_user'  # message queue for user
        )
        return HTTPFound(request.route_url('error_page'))

    try:
        invoice = Dues17Invoice.get_by_invoice_no(invoice_number.lstrip('0'))
        assert invoice is not None
    except AssertionError:
        request.session.flash(
            u"No invoice found!",
            'message_to_user'  # message queue for user
        )
        return HTTPFound(request.route_url('error_page'))

    # sanity check: invoice token must match with token
    try:
        assert (invoice.token == token)
    except AssertionError:
        request.session.flash(
            u"Token did not match!",
            'message_to_user'  # message queue for user
        )
        return HTTPFound(request.route_url('error_page'))

    # sanity check: invoice must not be reversal
    try:
        assert (not invoice.is_reversal)
    except AssertionError:
        request.session.flash(
            u"Token did not match!",
            'message_to_user'  # message queue for user
        )
        return HTTPFound(request.route_url('error_page'))

    # return a pdf file
    pdf_file = make_invoice_pdf_pdflatex(member, invoice)
    response = Response(content_type='application/pdf')
    pdf_file.seek(0)  # rewind to beginning
    response.app_iter = open(pdf_file.name, "r")
    return response
コード例 #59
0
ファイル: download.py プロジェクト: OpenCIOC/onlineresources
    def __call__(self):
        make_zip = False

        request = self.request
        user = request.user
        filename = request.context.filename

        download_dir = os.path.join(const._app_path, "download")
        fnamelower = filename.lower()

        need_super = False
        user_dom = None
        if fnamelower.endswith("cic.zip"):
            need_super = True
            user_dom = user.cic
        elif fnamelower.endswith("vol.zip"):
            need_super = True
            user_dom = user.vol

        if need_super:
            if not user_dom.SuperUser:
                self._security_failure()

        else:
            username = filename.rsplit("_", 1)
            if len(username) != 2 or username[0] != user.Login.replace(
                    " ", "_"):
                self._security_failure()

        if "/" in filename or "\\" in filename or ".." in filename or ":" in filename:
            self._security_failure()

        root, ext = os.path.splitext(filename)
        root2, ext2 = os.path.splitext(root)
        if ext.lower() == ".zip" and ext2:
            make_zip = True
            filename = root

        fullpath = None
        if fnamelower.endswith("cic.zip") or fnamelower.endswith("vol.zip"):
            fullpath = os.path.join(
                download_dir,
                str(request.dboptions.MemberID).join(
                    os.path.splitext(filename)),
            )
        else:
            fullpath = os.path.join(download_dir, filename)

        relativepath = os.path.relpath(fullpath, download_dir)

        if (".." in relativepath or "/" in relativepath or "\\" in relativepath
                or ":" in relativepath):
            self._security_failure()

        if not os.path.exists(fullpath):
            raise NotFound(_("File not found", request))

        if make_zip:
            file = tempfile.TemporaryFile()
            zip = zipfile.ZipFile(file, "w", zipfile.ZIP_DEFLATED)
            zip.write(fullpath, strip_accents(filename))
            zip.close()
            length = file.tell()
            file.seek(0)

            res = Response(content_type="application/zip", charset=None)
            res.app_iter = FileIterator(file)
            res.content_length = length
            res.last_modified = os.path.getmtime(fullpath)

        else:
            res = Response(content_type=get_mimetype(ext),
                           conditional_response=True)
            res.app_iter = FileIterable(fullpath)
            res.content_length = os.path.getsize(fullpath)
            res.last_modified = os.path.getmtime(fullpath)
            res.etag = "{}-{}-{}".format(
                os.path.getmtime(fullpath),
                os.path.getsize(fullpath),
                hash(fullpath),
            )

        res.headers[
            "Content-Disposition"] = "attachment;filename=" + strip_accents(
                request.context.filename)
        return res