def run_json2csv(request): log.debug('service : json2csv') response = request.response try: csvWriter = Json2Csv(request) csvWriter.parse(request) csvWriter.convert(request) except Exception as exc: try: log.error('@run_json2csv:1, ' + str(exc)) # zipfile will contain only a json error report txt file response.status_int = 400 zfh = csvWriter.get400ErrZipFile(request, str(exc)) except Exception as exc: log.error('@run_json2csv:2\n') raise500Error(request, str(exc)) else: try: zfh = csvWriter.getCsvZipFile() except Exception as exc: raise500Error(request, str(exc)) # rewind zipfile back to start of the file zfh.seek(0) # let the factory response set the content_length # because a length compare mismatch will cause an exception response.content_type = 'application/zip' response.app_iter = FileIter(zfh) return response
def update(self): output = PdfFileWriter() if self.recipients == [None] or self.preview: recipients = self.recipients else: recipients = [r for r in self.recipients if r and not r.email] membership_fee_data = get_membership_fees( currency_formatter=format_eur) subject = self.context.subject or '' compiler = pybars.Compiler() compiled_body = compiler.compile(self.context.body) for recipient in recipients: address = recipient.address if recipient else None data = self.recipients_data(recipient) if recipient in membership_fee_data: data.update(membership_fee_data[recipient]) if subject in self.additional.keys(): for nsubject, additional_data in self.additional[subject]( recipient, self.context.accounting_year): additional_data.update(data) body = format_markdown(compiled_body(additional_data)) output = self.add_page(address, nsubject, body, output) else: body = format_markdown(compiled_body(data)) output = self.add_page(address, subject, body, output) result = BytesIO() output.write(result) result.seek(0) response = self.request.response response.set_cookie('fileDownload', value='true') response.content_type = 'application/pdf' response.content_disposition = 'attachment; filename=Brief.pdf' response.app_iter = FileIter(result) self.result = response
def __call__(self): response = self.request.response response.content_type = 'application/xml' response.content_disposition = ('attachment; filename={}.xml'.format( self.filename)) response.app_iter = FileIter(BytesIO(self.data.encode('utf-8'))) return response
def process_csv(request, conditions): # # query all of the reports, store in a temporary CSV file and serve # reports = generate_reports(conditions) # report all in search query f = NamedTemporaryFile('w+b', prefix='CSV_Export_1', suffix='.csv', delete=True) fcsv = csv.writer(f) fcsv.writerow([ 'RCID', 'Draw Date', 'Result Date', 'NAT', 'DHIV', 'DHCV', 'DHBV', 'Location' ]) for report in reports: fcsv.writerow([ str(report.site_code) + str(report.reference_number), report.draw_date, report.test_date, report.nat, report.dhiv, report.dhcv, report.dhbv, report.location ]) f.seek(0) response = request.response response.content_type = 'application/csv' response.content_disposition = 'attachment; filename=%s' % 'export.csv' response.app_iter = FileIter(f) return response
def download(self): request = self.request external_system = request.context.external_system names = ['SystemCode', 'SystemName', 'CopyrightHolder1', 'CopyrightHolder2', 'ContactEmail'] root_parameters = [getattr(external_system, x) for x in names] isodate = datetime.now().replace(microsecond=0).isoformat() names.append('date') root_parameters.append(isodate) isodate = isodate.replace(':', '-') values = [quoteattr(str(x) if x is not None else '') for x in root_parameters] root_parameters = ' '.join('='.join(x) for x in zip(names, values)) fname = "CommunityMap-%s-%s.xml" % (external_system.SystemCode, isodate) file = tempfile.TemporaryFile() with zipfile.ZipFile(file, 'w', zipfile.ZIP_DEFLATED) as zf: with request.connmgr.get_connection() as conn: cursor = conn.execute('EXEC sp_External_Community_l_xml ?', external_system.SystemCode) _write_xml_data(root_parameters, cursor, zf, fname) length = file.tell() file.seek(0) res = request.response res.content_type = 'application/zip' res.charset = None res.app_iter = FileIter(file) res.content_length = length res.headers['Content-Disposition'] = 'attachment;filename=%s.zip' % fname[:-4] return res
def get_file_response(self, file_response: HapicFile, http_code: int): if file_response.file_path: from pyramid.response import FileResponse # TODO - G.M - 2019-03-27 - add support for overriding parameters of # file_response like content_length # Extended support for file response: # https://github.com/algoo/hapic/issues/171 response = FileResponse( path=file_response.file_path, # INFO - G.M - 2018-09-13 - If content_type is no, mimetype # is automatically guessed content_type=file_response.mimetype or None, ) else: from pyramid.response import FileIter from pyramid.response import Response response = Response(status=http_code) response.content_type = file_response.mimetype response.app_iter = FileIter(file_response.file_object) if file_response.content_length: response.content_length = file_response.content_length if file_response.last_modified: response.last_modified = file_response.last_modified response.status_code = http_code response.content_disposition = file_response.get_content_disposition_header_value( ) return response
def download(request): """ Packages up the provided... well, package. """ package = request.matchdict['package'] repo_path = request.registry.settings['repo.path'] file_path = os.path.join(repo_path, package) file_path = os.path.abspath(file_path) if not file_path.startswith(repo_path): raise HTTPForbidden() fp = tempfile.NamedTemporaryFile('w+b', dir='/tmp', delete=True) with tarfile.open(fileobj=fp, mode='w:gz') as tar: tar.add(file_path, arcname='') fp.seek(0) checksum = hash_file(fp) fp.seek(0) response = request.response response.content_type = 'application/x-gzip' response.content_disposition = 'attachment; filename={}.tar.gz'.format( package) response.app_iter = FileIter(fp) response.headers['X-Checksum'] = checksum return response
def specimen_labels(context, request): db_session = request.db_session label_queue = request.session.setdefault(SPECIMEN_LABEL_QUEUE, set()) class PrintForm(wtforms.Form): startcol = wtforms.IntegerField( u'Starting Column Position', default=1, validators=[wtforms.validators.InputRequired()]) startrow = wtforms.IntegerField( u'String Row Position', default=1, validators=[wtforms.validators.InputRequired()]) form = PrintForm(request.POST) if request.method == 'POST' and check_csrf_token(request): if not request.has_permission('process'): raise HTTPForbidden() if 'print' in request.POST and form.validate(): if label_queue: query = (db_session.query(models.Specimen).filter( models.Specimen.id.in_(label_queue)).order_by( models.Specimen.patient_id, models.Specimen.specimen_type_id, models.Specimen.id)) printables = iter( make_specimen_label(s) for s in query for i in six.moves.range(s.tubes) if s.tubes) stream = six.StringIO() printLabelSheet(stream, u'{} labels'.format(context.title), printables, SPECIMEN_LABEL_SETTINGS, form.startcol.data, form.startrow.data) stream.flush() stream.seek(0) request.session[SPECIMEN_LABEL_QUEUE] = set() request.session.changed() response = request.response response.content_type = 'application/pdf' response.content_disposition = 'attachment;filename=labels.pdf' response.app_iter = FileIter(stream) return response elif 'clear' in request.POST: request.session[SPECIMEN_LABEL_QUEUE] = set() request.session.changed() request.session.flash(u'Your Queue has been cleared', 'info') next = request.current_route_path(_route_name='lims.specimen') if request.is_xhr: return HTTPOk(json={'__next__': next}) else: return HTTPFound(location=next) return {'form': form, 'count': len(label_queue)}
def __call__(self): result = self.export() response = self.request.response response.content_type = 'application/xlsx' response.content_disposition = ('attachment; filename={}.xlsx'.format( self.filename)) response.app_iter = FileIter(result) return response
def __init__(self, f, request, disposition='attachment', cache_max_age=604800, content_type=None, content_encoding=None): """ :param f: the ``UploadedFile`` file field value. :type f: :class:`depot.io.interfaces.StoredFile` :param request: Current request. :type request: :class:`pyramid.request.Request` :param disposition: :type disposition: :param cache_max_age: The number of seconds that should be used to HTTP cache this response. :param content_type: The content_type of the response. :param content_encoding: The content_encoding of the response. It's generally safe to leave this set to ``None`` if you're serving a binary file. This argument will be ignored if you also leave ``content-type`` as ``None``. """ if f.public_url: raise HTTPMovedPermanently(f.public_url) content_encoding, content_type = self._get_type_and_encoding( content_encoding, content_type, f) super(StoredFileResponse, self).__init__( conditional_response=True, content_type=content_type, content_encoding=content_encoding) app_iter = None if request is not None and \ not get_settings()['kotti.depot_replace_wsgi_file_wrapper']: environ = request.environ if 'wsgi.file_wrapper' in environ: app_iter = environ['wsgi.file_wrapper'](f, _BLOCK_SIZE) if app_iter is None: app_iter = FileIter(f) self.app_iter = app_iter # assignment of content_length must come after assignment of app_iter self.content_length = f.content_length self.last_modified = f.last_modified if cache_max_age is not None: self.cache_expires = cache_max_age self.cache_control.public = True self.etag = self.generate_etag(f) self.content_disposition = rfc6266.build_header( f.filename, disposition=disposition, filename_compat=unidecode(f.filename))
def downloadversion(self): # Get modversion mv = self.get_db_object(ModVersion, perm=False) cdisp = 'attachment; filename="{0}-{1}.jar"'.format(mv.mod.name, mv.version) if mv.mod_file: return Response(app_iter=FileIter(mv.mod_file), content_type='application/zip', content_disposition=cdisp) else: return HTTPFound(mv.mod_file_url)
def __call__(self): response = self.request.response response.set_cookie('fileDownload', value='true') response.content_type = self.context.mimetype response.content_length = int(self.context.size) response.content_disposition = ( 'attachment; filename=%s' % self.context.name) response.app_iter = FileIter(BytesIO(self.context.data)) return response
def __call__(self): response = self.request.response response.set_cookie('fileDownload', value='true') response.content_type = self.context.map_mimetype response.content_length = int(self.context.map_size) response.content_disposition = 'attachment; filename=%s_map.%s' % ( self.context.number, self.context.map_mimetype.split('/')[1]) response.app_iter = FileIter(BytesIO(self.context.map_data)) return response
def __call__(self): pdf = self.get_pdf() response = self.request.response response.set_cookie('fileDownload', value='true') response.content_type = 'application/pdf' response.content_disposition = ('attachment; filename=%s.pdf' % self.filename) response.app_iter = FileIter(pdf) return response
def redirect(self, context, request, **kw): odtfile = kw.get('odtfile', io.BytesIO()) file_name = 'Extraction_cinema' response = request.response response.content_type = 'application/vnd.oasis.opendocument.text' response.content_disposition = 'inline; filename="{file_name}.odt"'.format( file_name=file_name) response.app_iter = FileIter(odtfile) return response
def get_doc(request): file_id = ObjectId(request.GET['file_id']) file = request.fs.get(file_id) response = request.response response.app_iter = FileIter(file) response.content_disposition = "attachment; filename*=UTF-8''%s" % urllib.parse.quote( file.name.encode('utf8')) response.content_type = "application/vnd.oasis.opendocument.text" return response
def __call__(self): data = self.data response = self.request.response response.set_cookie('fileDownload', value='true') response.content_type = 'text/comma-separated-values' response.content_length = len(data) response.content_disposition = ('attachment; filename=%s.csv' % self.filename) response.app_iter = FileIter(BytesIO(data)) return response
def download(request): listname=request.params['listname'] newList=[] images=[] listid=[] ids=[] s = conn.connect() connection=s["connection"] try: with connection.cursor() as cursor: sql6="SELECT idList from List where ListName=%s" cursor.execute(sql6,(listname)) res=cursor.fetchall() listid=res[0]['idList'] sql1="SELECT idCatalog from ListItems where idList='%s'" %(listid) cursor.execute(sql1) res1=cursor.fetchall() for a in res1: ids1=a['idCatalog'] ids.append(ids1) for c in ids: sql="SELECT URL from Images where idCatalog='%s'" %(c) cursor.execute(sql) res=cursor.fetchall() for b in res: images1=[b['URL']] images.extend(images1) for i in images: newList.append(i.split("/")[-1]) r = glob.glob(dst_dir1) for i in r: os.remove(i) for d in newList: for file in glob.iglob(os.path.join(src_dir,d)): shutil.copy(file,dst_dir) connection.commit() fp = tempfile.NamedTemporaryFile('w+b', dir=src_dir, delete=True) compression=zipfile.ZIP_DEFLATED zf = zipfile.ZipFile(fp, mode='w') for folder,subfolder,files in os.walk(dst_dir): for file in files: zf.write(os.path.join(folder, file),file,compress_type=compression) zf.close() fp.seek(0) response = request.response response.content_type = 'application/zip' response.app_iter = FileIter(fp) except Exception as e: print(e) finally: connection.close() return response
def download_json(context, request): fp = six.moves.cStringIO() json.dump(context.to_json(deep=True), fp, indent=2) fp.seek(0) response = request.response response.content_type = 'application/json' response.content_disposition = 'attachment; filename="%s-%s.json"' % ( context.name, context.publish_date.isoformat() if context.publish_date else 'draft') response.app_iter = FileIter(fp) return response
def blob(context, request): """ a unrendered, binary file """ output = io.BytesIO(context.data) output.seek(0) response = request.response response.app_iter = FileIter(output) headers = response.headers mime_type, _ = mimetypes.guess_type(context.__name__) if mime_type is None: mime_type = "application/download" headers["Content-Type"] = mime_type headers["Accept-Ranges"] = "bite" return response
def download_access_control(self): """ Download the ACL data as a gzipped-json file """ data = self.request.access.dump() compressed = six.BytesIO() zipfile = gzip.GzipFile(mode='wb', fileobj=compressed) zipfile.write(json.dumps(data, separators=(',', ':')).encode('utf8')) zipfile.close() compressed.seek(0) disp = CONTENT_DISPOSITION.tuples(filename='acl.json.gz') self.request.response.headers.update(disp) self.request.response.app_iter = FileIter(compressed) return self.request.response
def redirect(self, context, request, **kw): root = getSite() user = kw.get('user', None) user_title = getattr(user, 'title', user.name) now = datetime.datetime.now() date = to_localized_time(now, request=request, translate=True) file_name = 'Alerts_Extraction_{user}_{date}_{app}'.format( date=date, user=user_title, app=root.title) file_name = name_normalizer(file_name.replace(' ', '-')) csv_file = kw.get('file', '') response = request.response response.content_type = 'application/vnd.ms-excel;charset=windows-1252' response.content_disposition = 'inline; filename="{file_name}.csv"'.format( file_name=file_name) response.app_iter = FileIter(csv_file) return response
def download_file(self, context, request: TracimRequest, hapic_data=None): """ Download raw file of last revision of content. """ app_config = request.registry.settings['CFG'] api = ContentApi( current_user=request.current_user, session=request.dbsession, config=app_config, ) content = api.get_one(hapic_data.path.content_id, content_type=ContentType.Any) file = DepotManager.get().get(content.depot_file) response = request.response response.content_type = file.content_type response.app_iter = FileIter(file) return response
def download_wpsoutputs(request): # TODO: streaming html files does not work ... path = '/'.join(request.matchdict['subpath']) url = request.registry.settings.get('wps.output.url') url += '/' + path LOGGER.debug("delegate to wpsoutputs: %s", url) # forward request to target (without Host Header) # h = dict(request.headers) # h.pop("Host", h) response = requests.get(url, stream=True, verify=False) response.raise_for_status() def remove_header(key, headers): # the default header key should be the standard capitilized version e.g 'Content-Length' # TODO: move code to twitcher owsproxy try: del headers[key] except KeyError: try: del headers[key.lower()] except KeyError: try: del headers[key.upper()] except KeyError: pass # clean up headers headers = dict(response.headers) keys = [k.lower() for k in headers.keys()] if 'content-length' in keys: remove_header('Content-Length', headers) if 'transfer-encoding' in keys: remove_header('Transfer-Encoding', headers) if 'content-encoding' in keys: remove_header('Content-Encoding', headers) if 'connection' in keys: remove_header('Connection', headers) if 'keep-alive' in keys: remove_header('Keep-Alive', headers) proxy_response = Response(app_iter=FileIter(response.raw), status=response.status_code) proxy_response.headers.update(headers) return proxy_response
def get_file(request): # TODO: Add a route that enables the call to have the filename # appended to the end. This is so that gmail can read the services # Read more here: # http://stackoverflow.com/questions/20903967/gmails-new-image-caching-is-breaking-image-links-in-newsletter ctx = request.context document = ctx._instance f = File.get(document.id) if f.infected: raise HTTPNotAcceptable("Infected with a virus") escaped_double_quotes_filename = (f.title .replace(u'"', u'\\"') .encode('iso-8859-1', 'replace')) url_quoted_utf8_filename = url_quote(f.title.encode('utf-8')) handoff_to_nginx = asbool(config.get('handoff_to_nginx', False)) if handoff_to_nginx: kwargs = dict(body='') else: if 'Range' in request.headers: raise HTTPRequestRangeNotSatisfiable() fs = open(f.path, 'rb') app_iter = None environ = request.environ if 'wsgi.file_wrapper' in environ: app_iter = environ['wsgi.file_wrapper'](fs, _BLOCK_SIZE) if app_iter is None: app_iter = FileIter(fs, _BLOCK_SIZE) kwargs=dict(app_iter=app_iter) r = Response( content_length=f.size, content_type=str(f.mime_type), last_modified=f.creation_date, expires=datetime.now()+timedelta(days=365), accept_ranges="bytes" if handoff_to_nginx else "none", content_disposition= 'attachment; filename="%s"; filename*=utf-8\'\'%s' # RFC 6266 % (escaped_double_quotes_filename, url_quoted_utf8_filename), **kwargs ) if handoff_to_nginx: r.headers[b'X-Accel-Redirect'] = f.handoff_url return r
def client_download(request): """ Tars up the bigpkg client and serves the file for download. """ fp = tempfile.NamedTemporaryFile('w+b', dir='/tmp', delete=True) with tarfile.open(fileobj=fp, mode='w:gz') as tar: tar.add(request.registry.settings['client.path'], arcname='bigpkg') fp.seek(0) checksum = hash_file(fp) fp.seek(0) response = request.response response.content_type = 'application/x-gzip' response.content_disposition = 'attachment; filename=bigpkg.tar.gz' response.app_iter = FileIter(fp) response.headers['X-Checksum'] = checksum return response
def lfs_objects_oid_download(request): repo = request.matchdict.get('repo') oid = request.matchdict.get('oid') store = LFSOidStore(oid, repo, store_location=request.registry.git_lfs_store_path) if not store.has_oid(): log.debug('LFS: oid %s does not exists in store', oid) return write_response_error( HTTPNotFound, 'requested file with oid `%s` not found in store' % oid) # TODO(marcink): support range header ? # Range: bytes=0-, `bytes=(\d+)\-.*` f = open(store.oid_path, 'rb') response = Response(content_type='application/octet-stream', app_iter=FileIter(f)) response.headers.add('X-RC-LFS-Response-Oid', str(oid)) return response
def __init__(self, fileobj, request=None, cache_max_age=None, content_type=None, content_encoding=None): content_type = 'application/octet-stream' super(FileObjResponse, self).__init__(conditional_response=True, content_type=content_type, content_encoding=content_encoding) app_iter = None if request is not None: environ = request.environ if 'wsgi.file_wrapper' in environ: app_iter = environ['wsgi.file_wrapper'](fileobj, _BLOCK_SIZE) if app_iter is None: app_iter = FileIter(fileobj, _BLOCK_SIZE) self.app_iter = app_iter
def __init__(self, data, request=None, disposition='attachment', cache_max_age=None, content_type=None, content_encoding=None): if data._public_url: raise HTTPMovedPermanently(data._public_url) filename = data.filename content_type = content_type or getattr(data, 'content_type', None) if content_type is None: content_type, content_encoding = \ mimetypes.guess_type(filename, strict=False) if content_type is None: content_type = 'application/octet-stream' # str-ifying content_type is a workaround for a bug in Python 2.7.7 # on Windows where mimetypes.guess_type returns unicode for the # content_type. content_type = str(content_type) super(UploadedFileResponse, self).__init__(conditional_response=True, content_type=content_type, content_encoding=content_encoding) self.app_iter = FileIter(data.file, _BLOCK_SIZE) # assignment of content_length must come after assignment of app_iter self.content_length = data.file.content_length self.last_modified = data.file.last_modified if cache_max_age is not None: self.cache_expires = cache_max_age disp = '{0};filename="{1}"'.format( disposition, data.filename.encode('ascii', 'ignore')) self.headerlist.append(('Content-Disposition', disp))
def get_file(request): # TODO: Add a route that enables the call to have the filename # appended to the end. This is so that gmail can read the services # Read more here: # http://stackoverflow.com/questions/20903967/gmails-new-image-caching-is-breaking-image-links-in-newsletter if request.method == 'HEAD': # GET view_config captures HEAD... return get_file_header(request) ctx = request.context document = ctx._instance f = File.get(document.id) if f.infected: raise HTTPNotAcceptable("Infected with a virus") handoff_to_nginx = asbool(config.get('handoff_to_nginx', False)) if handoff_to_nginx: kwargs = dict(body='') else: if 'Range' in request.headers: raise HTTPRequestRangeNotSatisfiable() fs = f.file_stream app_iter = None environ = request.environ if 'wsgi.file_wrapper' in environ and f.path: app_iter = environ['wsgi.file_wrapper'](fs, _BLOCK_SIZE) if app_iter is None: app_iter = FileIter(fs, _BLOCK_SIZE) kwargs = dict(app_iter=app_iter) r = Response( content_length=f.file_size, content_type=str(f.mime_type), last_modified=f.creation_date, expires=datetime.now() + timedelta(days=365), accept_ranges="bytes" if handoff_to_nginx else "none", content_disposition=disposition(f.title), # RFC 6266 **kwargs ) if handoff_to_nginx: r.headers[b'X-Accel-Redirect'] = f.handoff_url return r