def extract(request, calc_id, what): """ Wrapper over the `oq extract` command. If `setting.LOCKDOWN` is true only calculations owned by the current user can be retrieved. """ job = logs.dbcmd('get_job', int(calc_id)) if job is None: return HttpResponseNotFound() if not utils.user_has_permission(request, job.user_name): return HttpResponseForbidden() try: # read the data and save them on a temporary .npz file with datastore.read(job.ds_calc_dir + '.hdf5') as ds: fd, fname = tempfile.mkstemp( prefix=what.replace('/', '-'), suffix='.npz') os.close(fd) n = len(request.path_info) query_string = unquote_plus(request.get_full_path()[n:]) aw = _extract(ds, what + query_string) a = {} for key, val in vars(aw).items(): key = str(key) # can be a numpy.bytes_ if isinstance(val, str): # without this oq extract would fail a[key] = numpy.array(val.encode('utf-8')) elif isinstance(val, dict): # this is hack: we are losing the values a[key] = list(val) else: a[key] = val numpy.savez_compressed(fname, **a) except Exception as exc: tb = ''.join(traceback.format_tb(exc.__traceback__)) return HttpResponse( content='%s: %s\n%s' % (exc.__class__.__name__, exc, tb), content_type='text/plain', status=500) # stream the data back stream = FileWrapper(open(fname, 'rb')) stream.close = lambda: (FileWrapper.close(stream), os.remove(fname)) response = FileResponse(stream, content_type='application/octet-stream') response['Content-Disposition'] = ( 'attachment; filename=%s' % os.path.basename(fname)) response['Content-Length'] = str(os.path.getsize(fname)) return response
def serve_file(self, filepath, headers): filestat = os.stat(filepath) self.size = filestat.st_size modtime = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(filestat.st_mtime)) self.headers.add_header('Last-Modified', modtime) if headers.get('if_modified_since') == modtime: # The browser cache is up-to-date, send a 304. self.status = "304 Not Modified" self.data = [] return ct = mimetypes.guess_type(filepath)[0] self.content_type = ct if ct else 'text/plain' try: f = open(filepath, 'rb') self.headers['Pragma'] = 'cache' self.headers['Cache-Control'] = 'private' self.headers['Content-Length'] = str(self.size) if self.etag: self.headers.add_header('Etag', self.etag) if self.expires: self.headers.add_header('Expires', self.expires) try: # Implement 206 partial file support. start, end = headers['range'].split('-') start = 0 if not start.isdigit() else int(start) end = self.size if not end.isdigit() else int(end) if self.size < end or start < 0: self.status = "214 Unsatisfiable Range Requested" self.data = FileWrapper(f, CHUNK_SIZE) else: f.seek(start) self.data = LimitingFileWrapper(f, CHUNK_SIZE, limit=end) self.status = "206 Partial Content" except: self.data = FileWrapper(f, CHUNK_SIZE) except IOError: self.status = "403 Forbidden"
def exportPDF(callingModelAdmin, request, whereToCreateFrom, whatToCreate, redirectTo): """This method exports PDFs provided by different Models in the crm application Args: callingModelAdmin (ModelAdmin): The calling ModelAdmin must be provided for error message response. request: The request User is to know where to save the error message whereToCreateFrom (Model): The model from which a PDF should be exported whatToCreate (str): What document Type that has to be redirectTo (str): String that describes to where the method sould redirect in case of an error Returns: HTTpResponse with a PDF when successful HTTpResponseRedirect when not successful Raises: raises Http404 exception if anything goes wrong""" try: pdf = whereToCreateFrom.createPDF(whatToCreate) response = HttpResponse(FileWrapper(open(pdf, 'rb')), content_type='application/pdf') response['Content-Length'] = path.getsize(pdf) except (TemplateSetMissing, UserExtensionMissing, CalledProcessError) as e: if type(e) == UserExtensionMissing: response = HttpResponseRedirect(redirectTo) callingModelAdmin.message_user(request, _("User Extension Missing")) elif type(e) == TemplateSetMissing: response = HttpResponseRedirect(redirectTo) callingModelAdmin.message_user(request, _("Templateset Missing")) elif type(e) == CalledProcessError: response = HttpResponseRedirect(redirectTo) callingModelAdmin.message_user(request, e.output) else: raise Http404 return response
def download_file(request,pk): file_type = request.GET.get('file_type') try : obj = RepositoryFiles.objects.get(id=pk) if file_type == 'image': download_file = obj.repo_image elif file_type == 'audio': download_file = obj.repo_audio elif file_type == 'video': download_file = obj.repo_video elif file_type == 'document': download_file = obj.repo_document except : return HttpResponseRedirect(request.META['HTTP_REFERER']) file_name = str(download_file.name) file_path = settings.MEDIA_ROOT+file_name content_type = mimetypes.guess_type(file_name.split('/')[::-1][0])[0] wrapper = FileWrapper(open(file_path,'rb')) response = HttpResponse(wrapper,content_type=content_type) response['Content-Length'] = os.path.getsize(file_path) response['Content-Disposition'] = 'attachment; filename=%s' %(file_name.split('/')[::-1][0]) return response
def download_video(request, video_id): if request.method != 'GET': return HttpResponseForbidden("Gotta Get Geet!") video = get_object_or_404(RawVideo, pk=video_id) user = request.user if not user.eligible(): return HttpResponseForbidden( "Error: user %s isn't eligible to render videos" % user) video_url, composite_name = render_video_for_user(user, video) save_video_for_user(user, video, video_url) print("Done rendering video - " + composite_name) with open(composite_name, 'rb') as file: print("Packing video for download") file_wrapper = FileWrapper(file) response = HttpResponse(file_wrapper, content_type='video/mp4') today = datetime.date.today().strftime("%B-%d-%Y") filename = "%s-%s.mp4" % (user.business_name, today) response[ 'Content-Disposition'] = 'attachment; filename="%s"' % filename print("Responding") return response
def send_file(request, filepath, force_download=False): """ Send a file through Django without loading the whole file into memory at once. The FileWrapper will turn the file object into an iterator for chunks of 8KB. """ filename = os.path.basename(filepath) extension = os.path.splitext(filepath)[1].lower() wrapper = FileWrapper(open(filepath)) response = HttpResponse(wrapper) # force download for certain filetypes extensions_to_download = [".7z", ".zip"] if force_download or (extension in extensions_to_download): response["Content-Type"] = "application/force-download" response[ "Content-Disposition"] = 'attachment; filename="' + filename + '"' else: response["Content-type"] = mimetypes.guess_type(filename)[0] response["Content-Length"] = os.path.getsize(filepath) return response
def download(request): # given the nodes, download the associated data odk = OdkForms() try: data = json.loads(request.body) filename = odk.fetch_data(data['form_id'], data['nodes[]'], data['format']) except KeyError: return HttpResponse(traceback.format_exc()) except Exception as e: print str(e) logging.error(traceback.format_exc()) wrapper = FileWrapper(file(filename)) response = HttpResponse( wrapper, content_type= 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') response[ 'Content-Disposition'] = 'attachment; filename=%s' % os.path.basename( filename) response['Content-Length'] = os.path.getsize(filename) return response
def download_order(request, order_id=None, *args, **kwargs): ''' Download our product media, if it exists. ''' if order_id == None: return redirect("/orders") qs = Order.objects.filter(id=order_id, user=request.user, status='paid', product__media__isnull=False) if not qs.exists(): return redirect("/orders") qs = Product.objects.filter(media__isnull=False) order_obj = qs.first() product_obj = order_obj.product if not product_obj.media: return redirect("/orders") media = product_obj.media product_path = media.path path = pathlib.Path(product_path) pk = product_obj.pk ext = path.suffix # csv, png, jpg fname = f"my-cool-product-{order_id}-{pk}{ext}" if not path.exists(): raise Http404 with open(path, 'rb') as f: wrapper = FileWrapper(f) content_type = 'application/force-download' guessed_ = guess_type(path)[0] if guessed_: content_type = guessed_ response = HttpResponse(wrapper, content_type=content_type) response['Content-Disposition'] = f"attachment;filename={fname}" response['X-SendFile'] = f"{fname}" return response
def get_log(request): """Return a response with the content of the file mentioned in ?path=fname Parameters ---------- request: http request """ # Got the idea from here: # https://stackoverflow.com/questions/8600843 pathToLog = request.GET.get("path") # First some simple security: only allow to serve log files if not any(pathToLog.endswith(x) for x in ['.log', '.stdout', '.stderr']): return HttpResponseForbidden( 'Forbidden: Sorry, invalid path requested.') if not os.path.exists(pathToLog): return HttpResponseNotFound('Path not found: %s' % pathToLog) response = HttpResponse(FileWrapper(open(pathToLog)), content_type=mimetypes.guess_type(pathToLog)[0]) response['Content-Length'] = os.path.getsize(pathToLog) response['Content-Disposition'] = 'attachment; filename=%s' % pathToLog return response
def send_file(file): """ Send a file through Django without loading the whole file into memory at once. The FileWrapper will turn the file object into an iterator for chunks of 8KB. """ print file filename = file.name # if settings.PRIVATE_MEDIA_USE_XSENDFILE: # # X-sendfile # response = StreamingHttpResponse() # response['X-Accel-Redirect'] = filename # Nginx # response['X-Sendfile'] = filename # Apache 2, mod-xsendfile # # Nginx doesn't overwrite headers, but does add the missing headers. # del response['Content-Type'] #else: # Can use django.views.static.serve() method (which supports if-modified-since), # but this also does the job well, as it's mainly for debugging. mimetype, encoding = mimetypes.guess_type(filename) response = HttpResponse(FileWrapper(file), content_type=mimetype) response['Content-Length'] = os.path.getsize(filename) return response
def export_manual_lineup(request): ds = request.session.get('ds') lidx = request.GET.getlist('lidx') path = "/tmp/.fantasy_nba_{}.csv".format(ds.lower()) csv_fields = CSV_FIELDS[ds] with open(path, 'w') as f: f.write(','.join(csv_fields) + '\n') for idx in lidx: key = '{}_lineup_{}'.format(ds, idx) lineup = request.session.get(key) players = [Player.objects.get(id=ii['player']) for ii in lineup] f.write(','.join([_get_export_cell(ii, ds) for ii in players]) + '\n') wrapper = FileWrapper(open(path, "r")) content_type = mimetypes.guess_type(path)[0] response = HttpResponse(wrapper, content_type=content_type) response['Content-Length'] = os.path.getsize(path) response['Content-Disposition'] = 'attachment; filename=%s' % smart_str( os.path.basename(path)) return response
def post(self, request): try: serializer = ThresholdsScoresImageSerializer(data=request.data) if serializer.is_valid(): plot_data = serializer.validated_data['plot_data'] thresholds = np.array(plot_data['thresholds']) scores = np.array(plot_data['threshold_scores']) temp_image = NamedTemporaryFile(suffix='.png') plot_thresholds_scores(temp_image.name, thresholds, scores) file_size= os.path.getsize(temp_image.name) response = HttpResponse(FileWrapper(temp_image), content_type='image/png', status=status.HTTP_201_CREATED) response['Content-Disposition'] = 'attachment; filename="%s"' % "threshold_scores.png" response['Content-Length'] = file_size return response else: return Response({}, status=status.HTTP_400_BAD_REQUEST) except Exception as e: logging.error(e) return Response({}, status=status.HTTP_400_BAD_REQUEST)
def get(self, request, *args, **kwargs): filepath = request.query_params.get('path') if not filepath: raise ValidationError('Files view expect a path to the file.') experiment = self.get_experiment() experiment_outputs_path = get_experiment_outputs_path( persistence_outputs=experiment.persistence_outputs, experiment_name=experiment.unique_name, original_name=experiment.original_unique_name, cloning_strategy=experiment.cloning_strategy) download_filepath = archive_outputs_file( persistence_outputs=experiment.persistence_outputs, outputs_path=experiment_outputs_path, namepath=experiment.unique_name, filepath=filepath) filename = os.path.basename(download_filepath) chunk_size = 8192 try: wrapped_file = FileWrapper(open(download_filepath, 'rb'), chunk_size) response = StreamingHttpResponse( wrapped_file, content_type=mimetypes.guess_type(download_filepath)[0]) response['Content-Length'] = os.path.getsize(download_filepath) response['Content-Disposition'] = "attachment; filename={}".format( filename) return response except FileNotFoundError: _logger.warning('Log file not found: log_path=%s', download_filepath) return Response(status=status.HTTP_404_NOT_FOUND, data='Log file not found: log_path={}'.format( download_filepath))
def generate_report_view(request): template_name = 'report_generation.html' deal_form = ReportModelForm(request.POST or None) report_date_form = ReportDateForm(request.POST or None) deals_list = [] if deal_form.is_valid(): partner_data = deal_form.cleaned_data if report_date_form.is_valid(): date_data = report_date_form.cleaned_data deals_list = Deal.objects.filter(partner_id=partner_data['partner_id'], deal_type=DealType.objects.get(value='Продажа'), date__range=[date_data['start_date'], date_data['finish_date']]) report_info = collect_data_for_report(deals_list, partner_data['partner_id'], date_data) generate_report(report_info) path_to_zip = "media/report.zip" response = HttpResponse(FileWrapper(open(path_to_zip,'rb')), content_type='application/zip') response['Content-Disposition'] = 'attachment; filename="report.zip"' return response #return send_from_directory('/home/bobkovs/PycharmProjects/Documents/', 'report.zip', as_attachment=True) context = { 'deal_form': deal_form, 'date_form': report_date_form } return render(request, template_name, context)
def stream(request, video_id): video = Video.objects.get(video_id=video_id) if video is None: return render(request, '404.html') rating = Video.G if isinstance(request.user, CustomUser): rating = request.user.rating video = Video.objects.filter(video_id=video_id) if rating < video[0].rating: return HttpResponse('You are not permitted to access this content', status=403) path = video.path range_header = request.META.get('HTTP_RANGE', '').strip() range_match = range_re.match(range_header) size = os.path.getsize(path) content_type, encoding = mimetypes.guess_type(path) content_type = content_type or 'application/octet-stream' if range_match: first_byte, last_byte = range_match.groups() first_byte = int(first_byte) if first_byte else 0 last_byte = int(last_byte) if last_byte else size - 1 if last_byte >= size: last_byte = size - 1 length = last_byte - first_byte + 1 resp = StreamingHttpResponse(RangeFileWrapper(open(path, 'rb'), offset=first_byte, length=length), status=206, content_type=content_type) resp['Content-Length'] = str(length) resp['Content-Range'] = 'bytes %s-%s/%s' % (first_byte, last_byte, size) else: resp = StreamingHttpResponse(FileWrapper(open(path, 'rb')), content_type=content_type) resp['Content-Length'] = str(size) resp['Accept-Ranges'] = 'bytes' return resp
def get(self, request, *args, **kwargs): post = get_object_or_404(self.get_queryset(), pk=self.kwargs['pk']) if request.user.is_superuser or request.user.has_perm( 'archives.change_post') or post.author_id == request.user.id: pass elif post.visible == 'private' or post.visible == 'sell' and not post.buyers.filter( id=request.user.id).exists(): raise Http404 chunk_size = 8192 response = StreamingHttpResponse( FileWrapper(open(post.attachment.path, 'rb'), chunk_size), content_type='application/octet-stream') response['Content-Length'] = post.attachment.size filename = post.attachment_filename if post.attachment_filename else 'attachment' response["Content-Disposition"] = \ "attachment; " \ "filenane={ascii_filename};" \ "filename*=UTF-8''{utf_filename}".format( ascii_filename=quote(filename), utf_filename=quote(filename) ) return response
def download(self, path, file_or_dir): if not re.match(r'[\w\d_ -/]*', path).group(0) == path: return HttpResponse('Invalid path') if file_or_dir == 'file': filepath = self.basepath + '/' + path wrapper = FileWrapper(open(filepath, 'rb')) response = HttpResponse( wrapper, content_type=mimetypes.guess_type(filepath)[0], ) response['Content-Length'] = os.path.getsize(filepath) response['Content-Disposition'] = ('attachment; filename=' + path.split('/')[-1]) return response elif file_or_dir == 'dir': dirpath = self.basepath + '/' + path dirname = dirpath.split('/')[-2] response = HttpResponse(content_type='application/x-gzip') response['Content-Disposition'] = ( 'attachment; filename=%s.tar.gz' % dirname) tarred = tarfile.open(fileobj=response, mode='w:gz') tarred.add(dirpath, arcname=dirname) tarred.close() return response
def download_log_view(request): """ Zip up the log buffer and then return as a file attachment. """ l = logging.getLogger() for h in l.handlers: if isinstance(h, desktop.log.log_buffer.FixedBufferHandler): try: # We want to avoid doing a '\n'.join of the entire log in memory # in case it is rather big. So we write it to a file line by line # and pass that file to zipfile, which might follow a more efficient path. tmp = tempfile.NamedTemporaryFile() log_tmp = tempfile.NamedTemporaryFile("w+t") if sys.version_info[0] == 2 else tempfile.NamedTemporaryFile("w+t", encoding='utf-8') for l in h.buf: log_tmp.write(smart_str(l, errors='replace') + '\n') # This is not just for show - w/out flush, we often get truncated logs log_tmp.flush() t = time.time() zip = zipfile.ZipFile(tmp, "w", zipfile.ZIP_DEFLATED) zip.write(log_tmp.name, "hue-logs/hue-%s.log" % t) zip.close() length = tmp.tell() # if we don't seek to start of file, no bytes will be written tmp.seek(0) wrapper = FileWrapper(tmp) response = HttpResponse(wrapper, content_type="application/zip") response['Content-Disposition'] = 'attachment; filename=hue-logs-%s.zip' % t response['Content-Length'] = length return response except Exception as e: LOG.exception("Couldn't construct zip file to write logs") return log_view(request) return django_render(request, "logs.mako", dict(log=[_("No logs found.")], is_embeddable=request.GET.get('is_embeddable', False)))
def download(request): """ Download from MobSF Route """ try: if request.method == 'GET': allowed_exts = settings.ALLOWED_EXTENSIONS filename = request.path.replace("/download/", "", 1) # Security Checks if "../" in filename: print "\n[ATTACK] Path Traversal Attack detected" return HttpResponseRedirect('/error/') ext = os.path.splitext(filename)[1] if ext in allowed_exts: dwd_file = os.path.join(settings.DWD_DIR, filename) if os.path.isfile(dwd_file): wrapper = FileWrapper(file(dwd_file)) response = HttpResponse( wrapper, content_type=allowed_exts[ext]) response['Content-Length'] = os.path.getsize(dwd_file) return response except: PrintException("Error Downloading File") return HttpResponseRedirect('/error/')
def download(request): """Download from MobSF Route.""" msg = 'Error Downloading File ' if request.method == 'GET': allowed_exts = settings.ALLOWED_EXTENSIONS filename = request.path.replace('/download/', '', 1) # Security Checks if '../' in filename: msg = 'Path Traversal Attack Detected' return print_n_send_error_response(request, msg) ext = os.path.splitext(filename)[1] if ext in allowed_exts: dwd_file = os.path.join(settings.DWD_DIR, filename) if os.path.isfile(dwd_file): wrapper = FileWrapper(open(dwd_file, 'rb')) response = HttpResponse( wrapper, content_type=allowed_exts[ext]) response['Content-Length'] = os.path.getsize(dwd_file) return response if ('screen/screen.png' not in filename and '-icon.png' not in filename): msg += filename return print_n_send_error_response(request, msg) return HttpResponse('')
def get_warc_stream(link): filename = "%s.warc.gz" % link.guid timestamp = link.creation_timestamp.strftime('%Y%m%d%H%M%S') warcinfo = make_detailed_warcinfo( filename=filename, guid=link.guid, coll_title='Perma Archive, %s' % link.submitted_title, coll_desc=link.submitted_description, rec_title='Perma Archive of %s' % link.submitted_title, pages=[{ 'title': link.submitted_title, 'url': link.submitted_url, 'timestamp': timestamp }]) warc_stream = FileWrapper(default_storage.open(link.warc_storage_file())) warc_stream = itertools.chain([warcinfo], warc_stream) response = StreamingHttpResponse(warc_stream, content_type="application/gzip") response['Content-Disposition'] = 'attachment; filename="%s"' % filename return response
def send_zipfile(request, task_id, file_path): """ 创建磁盘上的文件传输块8KB,不将整个文件加载到内存中。 类似的方法可以用于大型动态PDF文件。 """ #################### 文件打包开始 #################### zip_file_name = "task_id%s_files" % task_id # 创建一个ZipFile对象,表示一个zip文件。 # zip_file_name 表示文件名 # w 指示打开zip文件的模式 (默认值为’r’,表示读已经存在的zip文件,也可以为’w’或’a’,’w’表示新建一个zip文档或覆盖一个已经存在的zip文档,’a’表示将数据附加到一个现存的zip文档中。) # ZIP_DEFLATED 表示在写zip文档时使用的压缩方法,它的值可以是zipfile. ZIP_STORED 或zipfile. ZIP_DEFLATED。如果要操作的zip文件大小超过2G,应该将allowZip64设置为True。 archive = zipfile.ZipFile(zip_file_name, 'w', zipfile.ZIP_DEFLATED) file_list = os.listdir(file_path) for filename in file_list: # 将指定文件添加到zip文档中。第一个参数为文件路径,第二个参数arcname为添加到zip文档之后保存的名称, 第三个参数compress_type表示压缩方法,它的值可以是zipfile. ZIP_STORED 或zipfile. ZIP_DEFLATED。 archive.write('%s/%s' % (file_path, filename), arcname=filename) # 写入的任何文件在关闭之前都不会真正写入磁盘。 archive.close() #################### 文件打包完毕 #################### # 需要先将文件读入内存,再进行传输。 wrapper = FileWrapper(open(zip_file_name, 'rb')) # 更改 Headers 头部信息 (浏览器想怎么处理返回的数据都是根据content_type 返回的格式来进行处理的. response = HttpResponse(wrapper, content_type="application/zip") # 告诉浏览器 这个文件是以附件的形式下载 response[ 'Content-Disposition'] = "attachment; filename=%s.zip" % zip_file_name # 文件大小 response['Content-Length'] = os.path.getsize(zip_file_name) return response
def stream_video(request, choreography_file): range_re = re.compile(r'bytes\s*=\s*(\d+)\s*-\s*(\d*)', re.I) range_header = request.META.get('HTTP_RANGE', '').strip() range_match = range_re.match(range_header) size = os.path.getsize(choreography_file) content_type, encoding = mimetypes.guess_type(choreography_file) content_type = content_type or 'application/octet-stream' if range_match: first_byte, last_byte = range_match.groups() first_byte = int(first_byte) if first_byte else 0 last_byte = int(last_byte) if last_byte else size - 1 if last_byte >= size: last_byte = size - 1 length = last_byte - first_byte + 1 resp = StreamingHttpResponse(RangeFileWrapper(open(choreography_file, 'rb'), offset=first_byte, length=length), status=206, content_type=content_type) resp['Content-Length'] = str(length) resp['Content-Range'] = 'bytes %s-%s/%s' % (first_byte, last_byte, size) else: resp = StreamingHttpResponse(FileWrapper(open(choreography_file, 'rb')), content_type=content_type) resp['Content-Length'] = str(size) resp['Accept-Ranges'] = 'bytes' return resp
def get_datastore(request, job_id): """ Download a full datastore file. :param request: `django.http.HttpRequest` object. :param job_id: The id of the requested datastore :returns: A `django.http.HttpResponse` containing the content of the requested artifact, if present, else throws a 404 """ job = logs.dbcmd('get_job', int(job_id)) if job is None: return HttpResponseNotFound() if not utils.user_has_permission(request, job.user_name): return HttpResponseForbidden() fname = job.ds_calc_dir + '.hdf5' response = FileResponse( FileWrapper(open(fname, 'rb')), content_type=HDF5) response['Content-Disposition'] = ( 'attachment; filename=%s' % os.path.basename(fname)) return response
def download_course(self, request, **kwargs): self.is_authenticated(request) self.throttle_check(request) pk = kwargs.pop('pk', None) try: if request.user.is_staff: course = self._meta.queryset.get(pk=pk, is_archived=False) else: course = self._meta.queryset.get(pk=pk, is_archived=False, is_draft=False) except Course.DoesNotExist: raise Http404(_(u"Course not found")) except ValueError: try: if request.user.is_staff: course = self._meta.queryset.get(shortname=pk, is_archived=False) else: course = self._meta.queryset.get(shortname=pk, is_archived=False, is_draft=False) except Course.DoesNotExist: raise Http404(_(u"Course not found")) file_to_download = course.getAbsPath() has_completed_trackers = Tracker.has_completed_trackers( course, request.user) try: # add scheduling XML file if has_completed_trackers: file_to_download = settings.COURSE_UPLOAD_DIR + "temp/" + str( request.user.id) + "-" + course.filename shutil.copy2(course.getAbsPath(), file_to_download) zip = zipfile.ZipFile(file_to_download, 'a') if has_completed_trackers: zip.writestr(course.shortname + "/tracker.xml", Tracker.to_xml_string(course, request.user)) zip.close() wrapper = FileWrapper(file(file_to_download)) response = HttpResponse(wrapper, content_type='application/zip') response['Content-Length'] = os.path.getsize(file_to_download) response['Content-Disposition'] = 'attachment; filename="%s"' % ( course.filename) except IOError: raise Http404(_(u"Course not found")) # Add to tracker tracker = Tracker() tracker.user = request.user tracker.course = course tracker.type = 'download' tracker.data = json.dumps({'version': course.version}) tracker.ip = request.META.get('REMOTE_ADDR', oppia.api.DEFAULT_IP_ADDRESS) tracker.agent = request.META.get('HTTP_USER_AGENT', 'unknown') tracker.save() course_downloaded.send(sender=self, course=course, user=request.user) return response
def whitepaper_access(request, ratelimited=False): context = { 'active': 'whitepaper', 'title': _('Whitepaper'), 'minihero': _('Whitepaper'), 'suppress_logo': True, } if not request.POST.get('submit', False): return TemplateResponse(request, 'whitepaper_accesscode.html', context) if ratelimited: context['msg'] = _( "You're ratelimited. Please contact [email protected]") return TemplateResponse(request, 'whitepaper_accesscode.html', context) context['accesskey'] = request.POST.get('accesskey') context['email'] = request.POST.get('email') access_codes = AccessCodes.objects.filter( invitecode=request.POST.get('accesskey')) valid_access_code = access_codes.exists() if not valid_access_code: context['msg'] = _( "Invalid Access Code. Please contact [email protected]") return TemplateResponse(request, 'whitepaper_accesscode.html', context) ac = access_codes.first() if ac.uses >= ac.maxuses: context['msg'] = _( "You have exceeded your maximum number of uses for this access code. Please contact [email protected]" ) return TemplateResponse(request, 'whitepaper_accesscode.html', context) valid_email = True try: validate_email(request.POST.get('email', False)) except Exception as e: valid_email = False if not request.POST.get('email', False) or not valid_email: context['msg'] = _("Invalid Email. Please contact [email protected]") return TemplateResponse(request, 'whitepaper_accesscode.html', context) ip = get_ip(request) wa = WhitepaperAccess.objects.create( invitecode=request.POST.get('accesskey', False), email=request.POST.get('email', False), ip=ip, ) send_mail( settings.CONTACT_EMAIL, settings.CONTACT_EMAIL, _("New Whitepaper Generated"), str(wa), categories=['admin', 'whitepaper_gen'], ) # bottom watermark packet1 = BytesIO() can = canvas.Canvas(packet1, pagesize=letter) grey = Color(22 / 255, 6 / 255, 62 / 255, alpha=0.3) can.setFillColor(grey) can.setFontSize(8) lim = 30 email__etc = wa.email if len(wa.email) < lim else wa.email[0:lim] + "..." msg = gettext( "Generated for access code {} by email {} at {} via ip: {}. https://gitcoin.co/whitepaper" ).format(wa.invitecode, email__etc, wa.created_on.strftime("%Y-%m-%d %H:%M"), wa.ip) charlength = 3.5 width = len(msg) * charlength left = (600 - width) / 2 can.drawString(left, 7, msg) can.save() # middle watermark packet2 = BytesIO() can = canvas.Canvas(packet2, pagesize=letter) grey = Color(22 / 255, 6 / 255, 62 / 255, alpha=0.02) can.setFillColor(grey) can.setFontSize(100) msg = "WP{}".format(str(wa.pk).zfill(5)) charlength = 55 width = len(msg) * charlength left = (600 - width) / 2 can.rotate(45) can.drawString(320, 50, msg) can.save() # move to the beginning of the StringIO buffer path_to_file = 'assets/other/wp.pdf' new_pdf1 = PdfFileReader(packet1) new_pdf2 = PdfFileReader(packet2) # read your existing PDF existing_pdf = PdfFileReader(open(path_to_file, "rb")) output = PdfFileWriter() # add the "watermark" (which is the new pdf) on the existing page try: for i in range(0, 50): page = existing_pdf.getPage(i) page.mergePage(new_pdf1.getPage(0)) if i != 0: page.mergePage(new_pdf2.getPage(0)) output.addPage(page) except Exception as e: print(e) # finally, write "output" to a real file outputfile = "output/whitepaper_{}.pdf".format(wa.pk) outputStream = open(outputfile, "wb") output.write(outputStream) outputStream.close() filename = outputfile wrapper = FileWrapper(open(filename, 'rb')) response = HttpResponse(wrapper, content_type='application/pdf') response[ 'Content-Disposition'] = 'attachment; filename="GitcoinWhitepaper.pdf"' response['Content-Length'] = os.path.getsize(filename) return response
def wink_video(request): EYE_AR_THRESH = 0.21 # EYE_AR_THRESH2 = 0.23 EYE_AR_CONSEC_FRAMES = 1 # initialize the frame counters and the total number of blinks COUNTER = 0 LEFT_COUNTER = 0 RIGHT_COUNTER = 0 TOTAL = 0 BLINK_SHOW = 0 LEFTWINK_SHOW = 0 RIGHTWINK_SHOW = 0 # initialize dlib's face detector (HOG-based) and then create # the facial landmark predictor # print("[INFO] loading facial landmark predictor...") detector = dlib.get_frontal_face_detector() # predictor = dlib.shape_predictor(args["shape_predictor"]) cur_path = str(Path.cwd()) shape_predictor_path = "face_detection_project/static/shape_predictor_68_face_landmarks.dat" predictor_path = os.path.join(cur_path, shape_predictor_path) predictor = dlib.shape_predictor(predictor_path) # grab the indexes of the facial landmarks for the left and # right eye, respectively (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"] (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"] # start the video stream thread # print("[INFO] starting video stream thread...") # vs = _grab_video(stream=request.FILES['video']) data = request.FILES['video'] path = default_storage.save('tmp/somename.mp3', ContentFile(data.read())) tmp_file = os.path.join(settings.MEDIA_ROOT, path) vs = FileVideoStream(tmp_file).start() # else: # return HttpResponse("No Video was uploaded") # vs = FileVideoStream(args["video"]).start() fileStream = True # vs = VideoStream(src=0).start() # vs = VideoStream(usePiCamera=True).start() # fileStream = False time.sleep(1.0) # loop over frames from the video stream fourcc = cv2.VideoWriter_fourcc("M", "J", "P", "G") writer = None right_mark = None left_mark = None middle_mark = None while True: # if this is a file video stream, then we need to check if # there any more frames left in the buffer to process if fileStream and not vs.more(): break # grab the frame from the threaded video file stream, resize # it, and convert it to grayscale # channels) frame = vs.read() if frame is None: break frame = imutils.resize(frame, width=450) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # detect faces in the grayscale frame rects = detector(gray, 0) # loop over the face detections for rect in rects: # determine the facial landmarks for the face region, then # convert the facial landmark (x, y)-coordinates to a NumPy # array shape = predictor(gray, rect) shape = face_utils.shape_to_np(shape) # extract the left and right eye coordinates, then use the # coordinates to compute the eye aspect ratio for both eyes leftEye = shape[lStart:lEnd] rightEye = shape[rStart:rEnd] leftEAR = eye_aspect_ratio(leftEye) rightEAR = eye_aspect_ratio(rightEye) # average the eye aspect ratio together for both eyes ear = (leftEAR + rightEAR) / 2.0 # compute the convex hull for the left and right eye, then # visualize each of the eyes leftEyeHull = cv2.convexHull(leftEye) rightEyeHull = cv2.convexHull(rightEye) #this is to show the outlines of eyes # cv2.drawContours(frame, [leftEyeHull], -1, (255, 0, 255), 1) # cv2.drawContours(frame, [rightEyeHull], -1, (255, 0, 255), 1) # check to see if the eye aspect ratio is below the blink # threshold, and if so, increment the blink frame counter # if leftEAR < EYE_AR_THRESH and rightEAR < EYE_AR_THRESH: if leftEAR < EYE_AR_THRESH and rightEAR < EYE_AR_THRESH: COUNTER += 1 # otherwise, the eye aspect ratio is not below the blink # threshold elif leftEAR >= EYE_AR_THRESH or rightEAR >= EYE_AR_THRESH: # if the eyes were closed for a sufficient number of # then increment the total number of blinks if LEFTWINK_SHOW == 0 and RIGHTWINK_SHOW == 0 and COUNTER >= EYE_AR_CONSEC_FRAMES: TOTAL += 1 BLINK_SHOW = 5 # set star countdown variable to 10 frames # reset the eye frame counter COUNTER = 0 if leftEAR < EYE_AR_THRESH and rightEAR >= EYE_AR_THRESH: LEFT_COUNTER += 1 elif leftEAR >= EYE_AR_THRESH or rightEAR < EYE_AR_THRESH: if LEFT_COUNTER >= EYE_AR_CONSEC_FRAMES: LEFTWINK_SHOW = 5 LEFT_COUNTER = 0 # if leftEAR < EYE_AR_THRESH2 and rightEAR < EYE_AR_THRESH2: # COUNTER += 1 if rightEAR < EYE_AR_THRESH and leftEAR >= EYE_AR_THRESH: RIGHT_COUNTER += 1 elif rightEAR >= EYE_AR_THRESH or leftEAR < EYE_AR_THRESH: if RIGHT_COUNTER >= EYE_AR_CONSEC_FRAMES: RIGHTWINK_SHOW = 5 RIGHT_COUNTER = 0 # if leftEAR < EYE_AR_THRESH2 and rightEAR < EYE_AR_THRESH2: # COUNTER += 1 # draw the total number of blinks on the frame along with # the computed eye aspect ratio for the frame # cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30), # cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2) # cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30), # cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2) # cv2.putText(frame, "RightEAR: {:.2f}".format(rightEAR), (10, 200), # cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2) # cv2.putText(frame, "LeftEAR: {:.2f}".format(leftEAR), (300, 200), # cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2) if BLINK_SHOW > 0: # cv2.putText(frame, "BLINKING!", (150, 30), # cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2) y1, y2 = 20, 20 + middle_mark.shape[0] x1, x2 = 190, 190 + middle_mark.shape[1] for c in range(0, 3): frame[y1:y2, x1:x2, c] = (middle_mark_alpha_s * middle_mark[:, :, c] + middle_mark_alpha_l * frame[y1:y2, x1:x2, c]) BLINK_SHOW -= 1 if LEFTWINK_SHOW > 0: # cv2.putText(frame, "LEFT WINK!", (300, 100), # cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2) # cv2.circle(frame, (400,75), 30, (0,255,255), -1) y1, y2 = 100, 100 + left_mark.shape[0] x1, x2 = 350, 350 + left_mark.shape[1] for c in range(0, 3): frame[y1:y2, x1:x2, c] = (left_mark_alpha_s * left_mark[:, :, c] + left_mark_alpha_l * frame[y1:y2, x1:x2, c]) LEFTWINK_SHOW -= 1 if RIGHTWINK_SHOW > 0: # cv2.putText(frame, "RIGHT WINK!", (10, 100), # cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2) # p1 = (45, 50) # p2 = (105, 50) # p3 = (75, 100) # cv2.circle(frame, p1, 2, (0,0,255), -1) # cv2.circle(frame, p2, 2, (0,0,255), -1) # cv2.circle(frame, p3, 2, (0,0,255), -1) # triangle_cnt = np.array([p1, p2, p3]) # cv2.drawContours(frame, [triangle_cnt], 0, (0,0,255), -1) y1, y2 = 100, 100 + right_mark.shape[0] x1, x2 = 80, 80 + right_mark.shape[1] for c in range(0, 3): frame[y1:y2, x1:x2, c] = (right_mark_alpha_s * right_mark[:, :, c] + right_mark_alpha_l * frame[y1:y2, x1:x2, c]) RIGHTWINK_SHOW -= 1 #test # @@@this will show if its left wink # cv2.circle(frame, (400,75), 30, (0,255,255), -1) # cv.circle(img,(447,63), 63, (0,0,255), -1) # Let's define four points # pts = np.array( [[150, 100], [100, 200], [200, 200]], np.int32) # # Let's now reshape our points in form required by polylines # pts = pts.reshape((-1,1,2)) # cv2.polylines(frame, [pts], True, (0,0,255), -1) # @@@this will show if its right wink # p1 = (45, 50) # p2 = (105, 50) # p3 = (75, 100) # cv2.circle(frame, p1, 2, (0,0,255), -1) # cv2.circle(frame, p2, 2, (0,0,255), -1) # cv2.circle(frame, p3, 2, (0,0,255), -1) # # We can put the three points into an array and draw as a contour: # triangle_cnt = np.array( [p1, p2, p3] ) # cv2.drawContours(frame, [triangle_cnt], 0, (0,0,255), -1) # # Drawing the triangle with the help of lines # # on the black window With given points # # cv2.line is the inbuilt function in opencv library # cv2.line(frame, p1, p2, (255, 0, 0), 3) # cv2.line(frame, p2, p3, (255, 0, 0), 3) # cv2.line(frame, p1, p3, (255, 0, 0), 3) # pts = np.array([[50, 50],[100, 50],[75, 100]], np.int32) # pts = pts.reshape((-1,1,2)) # cv2.polylines(frame,[pts],True,(0,255,255)) # image = np.ones((300, 300, 3), np.uint8) * 255 # finding centroid using the following formula # (X, Y) = (x1 + x2 + x3//3, y1 + y2 + y3//3) # centroid = ((p1[0]+p2[0]+p3[0])//3, (p1[1]+p2[1]+p3[1])//3) # # Drawing the centroid on the window # cv2.circle(frame, centroid, 4, (0, 255, 0)) # pt1 = (150, 100) # pt2 = (100, 200) # pt3 = (200, 200) # cv2.circle(frame, pt1, 2, (0,0,255), -1) # cv2.circle(frame, pt2, 2, (0,0,255), -1) # cv2.circle(frame, pt3, 2, (0,0,255), -1) # cv2.circle(image, pt1, 2, (0,0,255), -1) # cv2.circle(image, pt2, 2, (0,0,255), -1) # cv2.circle(image, pt3, 2, (0,0,255), -1) #cv2.imwrite("res.png", img) # # show the frame # cv2.imshow("Frame", frame) # key = cv2.waitKey(1) & 0xFF # if the `q` key was pressed, break from the loop # if key == ord("q"): # break # check if the writer is None if writer is None: # store the image dimensions, initialzie the video writer, # and construct the zeros array (h, w) = frame.shape[:2] writer = cv2.VideoWriter("outpy.avi", fourcc, 30, (w, h), True) zeros = np.zeros((h, w), dtype="uint8") right_mark = cv2.imread(request.POST['right_wink_image'], -1) right_mark_alpha_s = right_mark[:, :, 3] / 255.0 right_mark_alpha_l = 1.0 - right_mark_alpha_s left_mark = cv2.imread(request.POST['left_wink_image'], -1) left_mark_alpha_s = left_mark[:, :, 3] / 255.0 left_mark_alpha_l = 1.0 - left_mark_alpha_s middle_mark = cv2.imread(request.POST['blink_image'], -1) middle_mark_alpha_s = middle_mark[:, :, 3] / 255.0 middle_mark_alpha_l = 1.0 - middle_mark_alpha_s # break the image into its RGB components, then construct the # RGB representation of each frame individually (B, G, R) = cv2.split(frame) R = cv2.merge([zeros, zeros, R]) G = cv2.merge([zeros, G, zeros]) B = cv2.merge([B, zeros, zeros]) # construct the final output frame, storing the original frame # at the top-left, the red channel in the top-right, the green # channel in the bottom-right, and the blue channel in the # bottom-left output = np.zeros((h, w, 3), dtype="uint8") output[0:h, 0:w] = frame # output[0:h, w:w * 2] = R # output[h:h * 2, w:w * 2] = G # output[h:h * 2, 0:w] = B # write the output frame to file writer.write(output) return_file = FileWrapper(open('outpy.avi', 'rb')) response = HttpResponse(return_file, content_type='video/avi') response['Content-Disposition'] = 'attachment; filename=my_video.avi' # do a bit of cleanup cv2.destroyAllWindows() vs.stop() writer.release() os.remove('outpy.avi') os.remove(tmp_file) return response
def retrieve(self, request, *args, **kwargs): job = self.get_object() try: target_format = request.accepted_renderer.format if target_format in ('html', 'api', 'json'): content_format = request.query_params.get( 'content_format', 'html') content_encoding = request.query_params.get( 'content_encoding', None) start_line = request.query_params.get('start_line', 0) end_line = request.query_params.get('end_line', None) dark_val = request.query_params.get('dark', '') dark = bool(dark_val and dark_val[0].lower() in ('1', 't', 'y')) content_only = bool(target_format in ('api', 'json')) dark_bg = (content_only and dark) or (not content_only and (dark or not dark_val)) content, start, end, absolute_end = job.result_stdout_raw_limited( start_line, end_line) # Remove any ANSI escape sequences containing job event data. content = re.sub( r'\x1b\[K(?:[A-Za-z0-9+/=]+\x1b\[\d+D)+\x1b\[K', '', content) body = ansiconv.to_html(cgi.escape(content)) context = { 'title': get_view_name(self.__class__), 'body': mark_safe(body), 'dark': dark_bg, 'content_only': content_only, } data = render_to_string('api/stdout.html', context).strip() if target_format == 'api': return Response(mark_safe(data)) if target_format == 'json': if content_encoding == 'base64' and content_format == 'ansi': return Response({ 'range': { 'start': start, 'end': end, 'absolute_end': absolute_end }, 'content': b64encode(content.encode('utf-8')) }) elif content_format == 'html': return Response({ 'range': { 'start': start, 'end': end, 'absolute_end': absolute_end }, 'content': body }) return Response(data) elif target_format == 'txt': return Response(job.result_stdout) elif target_format == 'ansi': return Response(job.result_stdout_raw) elif target_format in {'txt_download', 'ansi_download'}: filename = '{type}_{pk}{suffix}.txt'.format( type=camelcase_to_underscore(job.__class__.__name__), pk=job.id, suffix='.ansi' if target_format == 'ansi_download' else '') content_fd = job.result_stdout_raw_handle( enforce_max_bytes=False) if target_format == 'txt_download': content_fd = StdoutANSIFilter(content_fd) response = HttpResponse(FileWrapper(content_fd), content_type='text/plain') response[ "Content-Disposition"] = 'attachment; filename="{}"'.format( filename) return response else: return super(JobStdout, self).retrieve(request, *args, **kwargs) except StdoutMaxBytesExceeded as e: response_message = _( "Standard Output too large to display ({text_size} bytes), " "only download supported for sizes over {supported_size} bytes." ).format(text_size=e.total, supported_size=e.supported) if request.accepted_renderer.format == 'json': return Response({ 'range': { 'start': 0, 'end': 1, 'absolute_end': 1 }, 'content': response_message }) else: return Response(response_message)
def sample_down(request): #os.chdir(file_path) wrapper = FileWrapper(open(file_path, "r" )) response=HttpResponse(wrapper, content_type="text/plain") response['Content-Disposition'] ='attachment; filename="samples.txt"' return response
def __init__(self, limit=None, *args, **kwargs): self.limit = limit FileWrapper.__init__(self, *args, **kwargs)
def calc_result(request, result_id): """ Download a specific result, by ``result_id``. The common abstracted functionality for getting hazard or risk results. :param request: `django.http.HttpRequest` object. Can contain a `export_type` GET param (the default is 'xml' if no param is specified). :param result_id: The id of the requested artifact. :returns: If the requested ``result_id`` is not available in the format designated by the `export_type`. Otherwise, return a `django.http.HttpResponse` containing the content of the requested artifact. Parameters for the GET request can include an `export_type`, such as 'xml', 'geojson', 'csv', etc. """ # If the result for the requested ID doesn't exist, OR # the job which it is related too is not complete, # throw back a 404. try: job_id, job_status, job_user, datadir, ds_key = logs.dbcmd( 'get_result', result_id) if not utils.user_has_permission(request, job_user): return HttpResponseForbidden() except dbapi.NotFound: return HttpResponseNotFound() etype = request.GET.get('export_type') export_type = etype or DEFAULT_EXPORT_TYPE tmpdir = tempfile.mkdtemp() try: exported = core.export_from_db( (ds_key, export_type), job_id, datadir, tmpdir) except DataStoreExportError as exc: # TODO: there should be a better error page return HttpResponse(content='%s: %s' % (exc.__class__.__name__, exc), content_type='text/plain', status=500) if not exported: # Throw back a 404 if the exact export parameters are not supported return HttpResponseNotFound( 'Nothing to export for export_type=%s, %s' % (export_type, ds_key)) elif len(exported) > 1: # Building an archive so that there can be a single file download archname = ds_key + '-' + export_type + '.zip' zipfiles(exported, os.path.join(tmpdir, archname)) exported = os.path.join(tmpdir, archname) else: # single file exported = exported[0] content_type = EXPORT_CONTENT_TYPE_MAP.get( export_type, DEFAULT_CONTENT_TYPE) fname = 'output-%s-%s' % (result_id, os.path.basename(exported)) stream = FileWrapper(open(exported, 'rb')) # 'b' is needed on Windows stream.close = lambda: ( FileWrapper.close(stream), shutil.rmtree(tmpdir)) response = FileResponse(stream, content_type=content_type) response['Content-Disposition'] = ( 'attachment; filename=%s' % os.path.basename(fname)) response['Content-Length'] = str(os.path.getsize(exported)) return response
def read(self, amt): if amt > self.limit: amt = self.limit self.limit -= amt return FileWrapper.read(self, amt)
class FileSystemWorker(Worker): def __init__(self, *args, **kwargs): """Builds some instance variables that will last the life of the thread.""" Worker.__init__(self, *args, **kwargs) self.root = os.path.abspath(self.app_info['document_root']) self.display_index = self.app_info['display_index'] def serve_file(self, filepath, headers): filestat = os.stat(filepath) self.size = filestat.st_size modtime = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(filestat.st_mtime)) self.headers.add_header('Last-Modified', modtime) if headers.get('if_modified_since') == modtime: # The browser cache is up-to-date, send a 304. self.status = "304 Not Modified" self.data = [] return ct = mimetypes.guess_type(filepath)[0] self.content_type = ct if ct else 'text/plain' try: f = open(filepath, 'rb') self.headers['Pragma'] = 'cache' self.headers['Cache-Control'] = 'private' self.headers['Content-Length'] = str(self.size) if self.etag: self.headers.add_header('Etag', self.etag) if self.expires: self.headers.add_header('Expires', self.expires) try: # Implement 206 partial file support. start, end = headers['range'].split('-') start = 0 if not start.isdigit() else int(start) end = self.size if not end.isdigit() else int(end) if self.size < end or start < 0: self.status = "214 Unsatisfiable Range Requested" self.data = FileWrapper(f, CHUNK_SIZE) else: f.seek(start) self.data = LimitingFileWrapper(f, CHUNK_SIZE, limit=end) self.status = "206 Partial Content" except: self.data = FileWrapper(f, CHUNK_SIZE) except IOError: self.status = "403 Forbidden" def serve_dir(self, pth, rpth): def rel_path(path): return os.path.normpath(path[len(self.root):] if path.startswith(self.root) else path) if not self.display_index: self.status = '404 File Not Found' return b('') else: self.content_type = 'text/html' dir_contents = [os.path.join(pth, x) for x in os.listdir(os.path.normpath(pth))] dir_contents.sort() dirs = [rel_path(x)+'/' for x in dir_contents if os.path.isdir(x)] files = [rel_path(x) for x in dir_contents if os.path.isfile(x)] self.data = [INDEX_HEADER % dict(path='/'+rpth)] if rpth: self.data += [INDEX_ROW % dict(name='(parent directory)', cls='dir parent', link='/'.join(rpth[:-1].split('/')[:-1]))] self.data += [INDEX_ROW % dict(name=os.path.basename(x[:-1]), link=os.path.join(rpth, os.path.basename(x[:-1])).replace('\\', '/'), cls='dir') for x in dirs] self.data += ['<tr><th>Files</th></tr>'] self.data += [INDEX_ROW % dict(name=os.path.basename(x), link=os.path.join(rpth, os.path.basename(x)).replace('\\', '/'), cls='file') for x in files] self.data += [INDEX_FOOTER] self.headers['Content-Length'] = self.size = str(sum([len(x) for x in self.data])) self.status = '200 OK' def run_app(self, conn): self.status = "200 OK" self.size = 0 self.expires = None self.etag = None self.content_type = 'text/plain' self.content_length = None if __debug__: self.err_log.debug('Getting sock_file') # Build our file-like object sock_file = conn.makefile('rb',BUF_SIZE) request = self.read_request_line(sock_file) if request['method'].upper() not in ('GET', ): self.status = "501 Not Implemented" try: # Get our file path headers = dict([(str(k.lower()), v) for k, v in self.read_headers(sock_file).items()]) rpath = request.get('path', '').lstrip('/') filepath = os.path.join(self.root, rpath) filepath = os.path.abspath(filepath) if __debug__: self.err_log.debug('Request for path: %s' % filepath) self.closeConnection = headers.get('connection', 'close').lower() == 'close' self.headers = Headers([('Date', formatdate(usegmt=True)), ('Server', HTTP_SERVER_SOFTWARE), ('Connection', headers.get('connection', 'close')), ]) if not filepath.lower().startswith(self.root.lower()): # File must be within our root directory self.status = "400 Bad Request" self.closeConnection = True elif not os.path.exists(filepath): self.status = "404 File Not Found" self.closeConnection = True elif os.path.isdir(filepath): self.serve_dir(filepath, rpath) elif os.path.isfile(filepath): self.serve_file(filepath, headers) else: # It exists but it's not a file or a directory???? # What is it then? self.status = "501 Not Implemented" self.closeConnection = True h = self.headers statcode, statstr = self.status.split(' ', 1) statcode = int(statcode) if statcode >= 400: h.add_header('Content-Type', self.content_type) self.data = [statstr] # Build our output headers header_data = HEADER_RESPONSE % (self.status, str(h)) # Send the headers if __debug__: self.err_log.debug('Sending Headers: %s' % repr(header_data)) self.conn.sendall(b(header_data)) for data in self.data: self.conn.sendall(b(data)) if hasattr(self.data, 'close'): self.data.close() finally: if __debug__: self.err_log.debug('Finally closing sock_file') sock_file.close()