def printer_label(sample): """Generate the PDF of a sample for the label printer. :param sample: the sample the label of which should be generated :type sample: `samples.models.Sample` :return: the PDF as a byte stream :rtype: str """ output = StringIO() text = sample.name c = canvas.Canvas(output, pagesize=(width, height)) c.setAuthor("JuliaBase samples database") c.setTitle(text) c.setSubject("Label of {0} for the label printer".format(text)) try: print_line(c, 0, fontsize, text) except ExcessException: first, second = best_split(text) print_line(c, height / 2, fontsize_half, first, force=True) print_line(c, 0, fontsize_half, second, force=True) c.drawImage(ImageReader("http://chart.googleapis.com/chart?chs=116x116&cht=qr&chl={0}&chld=H|1".format(sample.id)), width - height, 0, height, height) c.showPage() c.save() return output.getvalue()
def server_connection(self, **kwargs): """Returns a context manager which yields an LDAP connection object. All keyword parameters passed are passed to the connection constructor. Note that “connection” here means a Python ldap3 object rather than the :py:class:`LDAPConnection` class. :raises ldap3.LDAPInvalidCredentialsResult: if you proveded user credentials in ``kwargs`` and they were invalid. """ connection_kwargs = {"raise_exceptions": True, "read_only": True} connection_kwargs.update(kwargs) for ad_ldap_url in settings.LDAP_URLS: server = ldap3.Server(**self.get_server_parameters(ad_ldap_url)) try: with ldap3.Connection(server, **connection_kwargs) as connection: yield connection break except ldap3.LDAPInvalidCredentialsResult: raise except ldap3.LDAPException: message = StringIO() traceback.print_exc(file=message) continue else: mail_admins("JuliaBase LDAP error", message.getvalue()) yield None
def _run_command(self, **options): """Run the profile command with the given options on the diffsettings command and capture the output""" output = StringIO() error = StringIO() args = [] if django.VERSION[0] == 1 and django.VERSION[1] < 8: options = options.copy() options['backend'] = 'cProfile' options['testing'] = True for option in ('fraction', 'max_calls', 'path', 'pattern', 'sort'): if option not in options: options[option] = None call_command('profile', 'diffsettings', stderr=error, stdout=output, **options) expected = 'INSTALLED_APPS' else: for option in options: if option in ('fraction', 'pattern', 'sort'): args.append('--{}={}'.format(option, options[option])) elif option == 'max_calls': args.append('--max-calls={}'.format(options[option])) elif option == 'path': args.append('--output={}'.format(options[option])) args.append('showmigrations') args.append('--plan') options = {'backend': 'cProfile', 'testing': True, 'stderr': error, 'stdout': output} call_command('profile', *args, **options) expected = '0001_initial' text = output.getvalue() assert expected in text return text
def render_markdown_from_file(f, **markdown_kwargs): """Render Markdown text from a file stream to HTML.""" s = StringIO() markdownFromFile(input=f, output=s, **markdown_kwargs) html = s.getvalue() s.close() return html
def output_results(profiler, options, stdout): """Generate the profiler output in the desired format. Implemented as a separate function so it can be run as an exit handler (because management commands often call exit() directly, bypassing the rest of the profile command's handle() method).""" profiler.create_stats() if not options['sort']: if not which('dot'): stdout.write('Could not find "dot" from Graphviz; please install Graphviz to enable call graph generation') return if not which('gprof2dot.py'): stdout.write('Could not find gprof2dot.py, which should have been installed by yet-another-django-profiler') return with tempfile.NamedTemporaryFile() as stats: stats.write(marshal.dumps(profiler.stats)) stats.flush() cmd = ('gprof2dot.py -f pstats {} | dot -Tpdf'.format(stats.name)) process = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE) output = process.communicate()[0] return_code = process.poll() if return_code: stdout.write('gprof2dot/dot exited with {}'.format(return_code)) return path = options['path'] with open(path, 'wb') as pdf_file: pdf_file.write(output) stdout.write('Wrote call graph to {}'.format(path)) else: sort = options['sort'] if sort == 'file': # Work around bug on Python versions >= 2.7.4 sort = 'fil' out = StringIO() stats = pstats.Stats(profiler, stream=out) with mock.patch('pstats.func_strip_path') as mock_func_strip_path: mock_func_strip_path.side_effect = func_strip_path stats.strip_dirs() restrictions = [] if options['pattern']: restrictions.append(options['pattern']) if options['fraction']: restrictions.append(float(options['fraction'])) elif options['max_calls']: restrictions.append(int(options['max_calls'])) elif not options['pattern']: restrictions.append(.2) stats.sort_stats(sort).print_stats(*restrictions) if options['path']: path = options['path'] with open(path, 'w') as text_file: text_file.write(out.getvalue()) stdout.write('Wrote profiling statistics to {}'.format(path)) else: stdout.write(out.getvalue())
def process_response(self, request, response): """ Handler for processing a response. Dumps the profiling information to the profile log file. """ timedloginfo = getattr(request, '_page_timedloginfo', None) if timedloginfo: timedloginfo.done() if ('profiling' in request.GET and getattr(settings, "LOGGING_ALLOW_PROFILING", False)): init_profile_logger() self.profiler.create_stats() # Capture the stats out = StringIO() old_stdout, sys.stdout = sys.stdout, out self.profiler.print_stats(1) sys.stdout = old_stdout profile_log = logging.getLogger("profile") profile_log.log(logging.INFO, "Profiling results for %s (HTTP %s):", request.path, request.method) profile_log.log(logging.INFO, out.getvalue().strip()) profile_log.log(logging.INFO, '%d database queries made\n', len(connection.queries)) queries = {} for query in connection.queries: sql = reformat_sql(query['sql']) stack = ''.join(query['stack'][:-1]) time = query['time'] if sql in queries: queries[sql].append((time, stack)) else: queries[sql] = [(time, stack)] times = {} for sql, entries in six.iteritems(queries): time = sum((float(entry[0]) for entry in entries)) tracebacks = '\n\n'.join((entry[1] for entry in entries)) times[time] = \ 'SQL Query profile (%d times, %.3fs average)\n%s\n\n%s\n\n' % \ (len(entries), time / len(entries), sql, tracebacks) sorted_times = sorted(six.iterkeys(times), reverse=1) for time in sorted_times: profile_log.log(logging.INFO, times[time]) return response
def test_generate_key(self): stdout = StringIO() try: keygen.main(stdout=stdout, argv=[]) except SystemExit as exc: self.assertEqual(exc.code, 0) key = stdout.getvalue() f = Fernet(key) # Make sure this doesn't raise an error about a bad key. f.decrypt(f.encrypt(b'whatever'))
def process_response(self, request, response): if settings.YADP_ENABLED and settings.YADP_PROFILE_PARAMETER in request.REQUEST: if self.error: return text_response(response, self.error) self.profiler.create_stats() mode = request.REQUEST[settings.YADP_PROFILE_PARAMETER] if mode == 'file': # Work around bug on Python versions >= 2.7.4 mode = 'fil' if not mode: if not which('dot'): return text_response(response, 'Could not find "dot" from Graphviz; please install Graphviz to enable call graph generation') if not which('gprof2dot.py'): return text_response(response, 'Could not find gprof2dot.py, which should have been installed by yet-another-django-profiler') with tempfile.NamedTemporaryFile() as stats: stats.write(marshal.dumps(self.profiler.stats)) stats.flush() cmd = ('gprof2dot.py -f pstats {} | dot -Tpdf'.format(stats.name)) process = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE) output = process.communicate()[0] return_code = process.poll() if return_code: raise Exception('gprof2dot/dot exited with {}'.format(return_code)) response.content = output response['Content-Type'] = 'application/pdf' return response elif mode == 'help': return text_response(response, ProfilerMiddleware.__doc__) else: out = StringIO() stats = pstats.Stats(self.profiler, stream=out) with mock.patch('pstats.func_strip_path') as mock_func_strip_path: mock_func_strip_path.side_effect = func_strip_path stats.strip_dirs() restrictions = [] if settings.YADP_PATTERN_PARAMETER in request.REQUEST: restrictions.append(request.REQUEST[settings.YADP_PATTERN_PARAMETER]) if settings.YADP_FRACTION_PARAMETER in request.REQUEST: restrictions.append(float(request.REQUEST[settings.YADP_FRACTION_PARAMETER])) elif settings.YADP_MAX_CALLS_PARAMETER in request.REQUEST: restrictions.append(int(request.REQUEST[settings.YADP_MAX_CALLS_PARAMETER])) elif settings.YADP_PATTERN_PARAMETER not in request.REQUEST: restrictions.append(.2) try: stats.sort_stats(mode).print_stats(*restrictions) except KeyError: # Bad parameter for sorting stats return text_response(response, "Bad parameter passed for sorting statistics.\n" + ProfilerMiddleware.__doc__) return html_response(request, response, out.getvalue()) return response
def render_markdown_from_file(f): """Renders Markdown text to HTML. The Markdown text will be sanitized to prevent injecting custom HTML. It will also enable a few plugins for code highlighting and sane lists. """ s = StringIO() markdownFromFile(input=f, output=s, **MARKDOWN_KWARGS) html = s.getvalue() s.close() return html
def output_results(profiler, options, stdout): """Generate the profiler output in the desired format. Implemented as a separate function so it can be run as an exit handler (because management commands often call exit() directly, bypassing the rest of the profile command's handle() method).""" profiler.create_stats() if not options['sort']: if not which('dot'): stdout.write( 'Could not find "dot" from Graphviz; please install Graphviz to enable call graph generation' ) return if not which('gprof2dot.py'): stdout.write( 'Could not find gprof2dot.py, which should have been installed by yet-another-django-profiler' ) return return_code, output = run_gprof2dot(profiler) if return_code: stdout.write('gprof2dot/dot exited with {}'.format(return_code)) return path = options['path'] with open(path, 'wb') as pdf_file: pdf_file.write(output) stdout.write('Wrote call graph to {}'.format(path)) else: sort = options['sort'] if sort == 'file': # Work around bug on Python versions >= 2.7.4 sort = 'fil' out = StringIO() stats = pstats.Stats(profiler, stream=out) with mock.patch('pstats.func_strip_path') as mock_func_strip_path: mock_func_strip_path.side_effect = func_strip_path stats.strip_dirs() restrictions = [] if options['pattern']: restrictions.append(options['pattern']) if options['fraction']: restrictions.append(float(options['fraction'])) elif options['max_calls']: restrictions.append(int(options['max_calls'])) elif not options['pattern']: restrictions.append(.2) stats.sort_stats(sort).print_stats(*restrictions) if options['path']: path = options['path'] with open(path, 'w') as text_file: text_file.write(out.getvalue()) stdout.write('Wrote profiling statistics to {}'.format(path)) else: stdout.write(out.getvalue())
def _run_command(self, **options): """Run the profile command with the given options on the diffsettings command and capture the output""" output = StringIO() options = options.copy() options['backend'] = 'cProfile' options['testing'] = True for option in ('fraction', 'max_calls', 'path', 'pattern', 'sort'): if option not in options: options[option] = None call_command('profile', 'diffsettings', stdout=output, **options) text = output.getvalue() assert 'INSTALLED_APPS' in text return text
def process_response(self, request, response): if self.profile_parameter is not None: response.status_code = 200 if self.error: return text_response(response, self.error) self.profiler.create_stats() mode = self.profile_parameter if mode == 'file': # Work around bug on Python versions >= 2.7.4 mode = 'fil' if not mode: if not which('dot'): return text_response( response, _('Could not find "dot" from Graphviz; please install Graphviz to enable call graph generation' )) if not which('gprof2dot.py'): return text_response( response, _('Could not find gprof2dot.py, which should have been installed by yet-another-django-profiler' )) return_code, output = run_gprof2dot(self.profiler) if return_code: raise Exception( _('gprof2dot.py exited with {return_code}').format( return_code=return_code)) set_content(response, output) response['Content-Type'] = 'application/pdf' return response elif mode == 'help': return text_response(response, ProfilerMiddleware.__doc__) else: out = StringIO() stats = pstats.Stats(self.profiler, stream=out) with mock.patch( 'pstats.func_strip_path') as mock_func_strip_path: mock_func_strip_path.side_effect = func_strip_path stats.strip_dirs() restrictions = [] if self.pattern_parameter is not None: restrictions.append(self.pattern_parameter) if self.fraction_parameter is not None: restrictions.append(float(self.fraction_parameter)) elif self.max_calls_parameter is not None: restrictions.append(int(self.max_calls_parameter)) elif self.pattern_parameter is None: restrictions.append(.2) stats.sort_stats(mode).print_stats(*restrictions) return text_response(response, out.getvalue()) return response
def encode(self, o, *args, **kwargs): self.level = 0 self.doIndent = False stream = StringIO() self.xml = XMLGenerator(stream, settings.DEFAULT_CHARSET) self.xml.startDocument() self.startElement("rsp") self.__encode(o, *args, **kwargs) self.endElement("rsp") self.xml.endDocument() self.xml = None return stream.getvalue()
def prepend_data(self, data): """Prepend data to the buffer. Args: data (bytes): The data to prepend. """ if data: new_data_io = StringIO() new_data_io.write(data) new_data_io.write(self._data_io.getvalue()) self._data_io.close() self._data_io = new_data_io
def __init__(self, stream=None, dialect=csv.excel_tab, encoding="utf-8", **kwargs): """Additional keyword arguments are passed to the ``csv.writer`` factory function in Python's ``csv`` module. After having instantiated this class, you can use `writerow` and `writerows` to add data to it, and then extract it in the CSV format using `getvalue`. :param stream: the writable file-like object where the output should be sent; if ``None``, you must get the outout with `getvalue`. :param dialect: the CSV format; it defaults to Excel's TAB format (TAB-separated, double-quotes) :param encoding: name of the output encoding to be used; defaults to UTF-8 :type stream: file :type dialect: ``csv.Dialect`` :type encoding: str """ import codecs self.queue = StringIO() self.writer = csv.writer(self.queue, dialect=dialect, **kwargs) self.stream = stream if stream else StringIO() self.encoder = codecs.getincrementalencoder(encoding)()
def pop(self): """Return the current contents of the buffer then clear it.""" s = self.buffer.getvalue() self.buffer.close() self.buffer = StringIO() return s
def _migrate_extension_models(self, ext_class): """Perform database migrations for an extension's models. This will call out to Django Evolution to handle the migrations. Args: ext_class (djblets.extensions.extension.Extension): The class for the extension to migrate. """ try: from django_evolution.management.commands.evolve import \ Command as Evolution except ImportError: raise InstallExtensionError( "Unable to migrate the extension's database tables. Django " "Evolution is not installed.") try: stream = StringIO() evolution = Evolution() evolution.style = no_style() evolution.execute(verbosity=0, interactive=False, execute=True, hint=False, compile_sql=False, purge=False, database=False, stdout=stream, stderr=stream) output = stream.getvalue() if output: logging.info('Evolved extension models for %s: %s', ext_class.id, stream.read()) stream.close() except CommandError as e: # Something went wrong while running django-evolution, so # grab the output. We can't raise right away because we # still need to put stdout back the way it was output = stream.getvalue() stream.close() logging.error('Error evolving extension models: %s: %s', e, output, exc_info=1) load_error = self._store_load_error(ext_class.id, output) raise InstallExtensionError(six.text_type(e), load_error)
def process_response(self, request, response): if self.profile_parameter is not None: if self.error: return text_response(response, self.error) self.profiler.create_stats() mode = self.profile_parameter if mode == 'file': # Work around bug on Python versions >= 2.7.4 mode = 'fil' if not mode: if not which('dot'): return text_response(response, _('Could not find "dot" from Graphviz; please install Graphviz to enable call graph generation')) if not which('gprof2dot.py'): return text_response(response, _('Could not find gprof2dot.py, which should have been installed by yet-another-django-profiler')) return_code, output = run_gprof2dot(self.profiler) if return_code: raise Exception(_('gprof2dot.py exited with {return_code}').format(return_code=return_code)) set_content(response, output) response['Content-Type'] = 'application/pdf' return response elif mode == 'help': return text_response(response, ProfilerMiddleware.__doc__) else: out = StringIO() stats = pstats.Stats(self.profiler, stream=out) with mock.patch('pstats.func_strip_path') as mock_func_strip_path: mock_func_strip_path.side_effect = func_strip_path stats.strip_dirs() restrictions = [] if self.pattern_parameter is not None: restrictions.append(self.pattern_parameter) if self.fraction_parameter is not None: restrictions.append(float(self.fraction_parameter)) elif self.max_calls_parameter is not None: restrictions.append(int(self.max_calls_parameter)) elif self.pattern_parameter is None: restrictions.append(.2) stats.sort_stats(mode).print_stats(*restrictions) return text_response(response, out.getvalue()) return response
def _run_command(self, **options): """Run the profile command with the given options on the diffsettings command and capture the output""" output = StringIO() error = StringIO() args = [] if django.VERSION[0] == 1 and django.VERSION[1] < 8: options = options.copy() options['backend'] = 'cProfile' options['testing'] = True for option in ('fraction', 'max_calls', 'path', 'pattern', 'sort'): if option not in options: options[option] = None call_command('profile', 'diffsettings', stderr=error, stdout=output, **options) expected = 'INSTALLED_APPS' else: for option in options: if option in ('fraction', 'pattern', 'sort'): args.append('--{}={}'.format(option, options[option])) elif option == 'max_calls': args.append('--max-calls={}'.format(options[option])) elif option == 'path': args.append('--output={}'.format(options[option])) args.append('showmigrations') args.append('--plan') options = { 'backend': 'cProfile', 'testing': True, 'stderr': error, 'stdout': output } call_command('profile', *args, **options) expected = '0001_initial' text = output.getvalue() assert expected in text return text
def arch_month_mbox(request, list_name, year, month_name): """ The messages must be rebuilt before being added to the mbox file, including headers and the textual content, making sure to escape email addresses. """ return HttpResponse("Not implemented yet.", content_type="text/plain", status=500) mlist = get_list_by_name(list_name, request.get_host()) month = month_name_to_num(month_name) year = int(year) begin_date = datetime.datetime(year, month, 1) if month != 12: end_month = month + 1 else: end_month = 1 end_date = datetime.datetime(year, end_month, 1) messages = Email.objects.filter( mailinglist=mlist, date__gte=begin_date, date__lte=end_date ).order_by("date") mboxfile, mboxfilepath = tempfile.mkstemp( prefix="hyperkitty-", suffix=".mbox.gz") os.close(mboxfile) mbox = mailbox.mbox(mboxfilepath) for msg in messages: mbox.add(msg.full) mbox.close() content = StringIO() zipped_content = gzip.GzipFile(fileobj=content) with gzip.GzipFile(fileobj=content, mode="wb") as zipped_content: with open(mboxfilepath, "rb") as mboxfile: zipped_content.write(mboxfile.read()) response = HttpResponse(content.getvalue()) content.close() response['Content-Type'] = "application/mbox+gz" response['Content-Disposition'] = 'attachment; filename=%d-%s.txt.gz' \ % (year, month_name) response['Content-Length'] = len(response.content) os.remove(mboxfilepath) return response
def read_solarsimulator_plot_file(filename, position): """Read a datafile from a solarsimulator measurement and return the content of the voltage column and the selected current column. :param filename: full path to the solarsimulator measurement data file :param position: the position of the cell the currents of which should be read. :type filename: str :type position: str :return: all voltages in Volt, then all currents in Ampere :rtype: list of float, list of float :raises PlotError: if something wents wrong with interpreting the file (I/O, unparseble data) """ try: datafile_content = StringIO(open(filename).read()) except IOError: raise PlotError("Data file could not be read.") for line in datafile_content: if line.startswith("# Positions:"): positions = line.partition(":")[2].split() break else: positions = [] try: column = positions.index(position) + 1 except ValueError: raise PlotError("Cell position not found in the datafile.") datafile_content.seek(0) try: return numpy.loadtxt(datafile_content, usecols=(0, column), unpack=True) except ValueError: raise PlotError("Data file format was invalid.")
def _cache_store_large_data(cache, key, data, expiration, compress_large_data): # We store large data in the cache broken into chunks that are 1M in size. # To do this easily, we first pickle the data and compress it with zlib. # This gives us a string which can be chunked easily. These are then stored # individually in the cache as single-element lists (so the cache backend # doesn't try to convert binary data to utf8). The number of chunks needed # is stored in the cache under the unadorned key file = StringIO() pickler = pickle.Pickler(file) pickler.dump(data) data = file.getvalue() if compress_large_data: data = zlib.compress(data) i = 0 while len(data) > CACHE_CHUNK_SIZE: chunk = data[0:CACHE_CHUNK_SIZE] data = data[CACHE_CHUNK_SIZE:] cache.set(make_cache_key('%s-%d' % (key, i)), [chunk], expiration) i += 1 cache.set(make_cache_key('%s-%d' % (key, i)), [data], expiration) cache.set(make_cache_key(key), '%d' % (i + 1), expiration)
def __init__(self): """Initialize the parsed file information.""" self.origFile = None self.newFile = None self.origInfo = None self.newInfo = None self.origChangesetId = None self.binary = False self.deleted = False self.moved = False self.copied = False self.insert_count = 0 self.delete_count = 0 self._data_io = StringIO() self._data = None
def _migrate_extension_models(self, ext_class): """Perform database migrations for an extension's models. This will call out to Django Evolution to handle the migrations. Args: ext_class (djblets.extensions.extension.Extension): The class for the extension to migrate. """ if django_evolution is None: # Django Evolution isn't installed. Extensions with evolutions # are not supported. return try: stream = StringIO() call_command('evolve', verbosity=0, interactive=False, execute=True, stdout=stream, stderr=stream) output = stream.getvalue() if output: logging.info('Evolved extension models for %s: %s', ext_class.id, stream.read()) stream.close() except CommandError as e: # Something went wrong while running django-evolution, so # grab the output. We can't raise right away because we # still need to put stdout back the way it was output = stream.getvalue() stream.close() logging.error('Error evolving extension models: %s: %s', e, output, exc_info=1) load_error = self._store_load_error(ext_class.id, output) raise InstallExtensionError(six.text_type(e), load_error)
def _migrate_extension_models(self, ext_class): """Perform database migrations for an extension's models. This will call out to Django Evolution to handle the migrations. Args: ext_class (djblets.extensions.extension.Extension): The class for the extension to migrate. """ if django_evolution is None: # Django Evolution isn't installed. Extensions with evolutions # are not supported. return try: stream = StringIO() call_command('evolve', verbosity=0, interactive=False, execute=True, stdout=stream, stderr=stream) output = stream.getvalue() if output: logger.info('Evolved extension models for %s: %s', ext_class.id, stream.read()) stream.close() except CommandError as e: # Something went wrong while running django-evolution, so # grab the output. We can't raise right away because we # still need to put stdout back the way it was output = stream.getvalue() stream.close() logger.exception('Error evolving extension models: %s: %s', e, output) load_error = self._store_load_error(ext_class.id, output) raise InstallExtensionError(six.text_type(e), load_error)
class FileStream(object): """File stream for streaming reponses This buffer intended for use as an argument to StreamingHTTPResponse and also as a file for TarFile to write into. Files are read in by chunks and written to this buffer through TarFile. When there is content to be read from the buffer, it is taken up by StreamingHTTPResponse and the buffer is cleared to prevent storing large chunks of data in memory. """ def __init__(self): self.buffer = StringIO() self.offset = 0 def write(self, s): """Write ``s`` to the buffer and adjust the offset.""" self.buffer.write(s) self.offset += len(s) def tell(self): """Return the current position of the buffer.""" return self.offset def close(self): """Close the buffer.""" self.buffer.close() def pop(self): """Return the current contents of the buffer then clear it.""" s = self.buffer.getvalue() self.buffer.close() self.buffer = StringIO() return s
def _cache_store_chunks(items, key, expiration): """Store a list of items as chunks in the cache. The list of items will be combined into chunks and stored in the cache as efficiently as possible. Each item in the list will be yielded to the caller as it's fetched from the list or generator. """ chunks_data = StringIO() chunks_data_len = 0 read_start = 0 item_count = 0 i = 0 for data, has_item, item in items: if has_item: yield item item_count += 1 chunks_data.write(data) chunks_data_len += len(data) if chunks_data_len > CACHE_CHUNK_SIZE: # We have enough data to fill a chunk now. Start processing # what we've stored and create cache keys for each chunk. # Anything remaining will be stored for the next round. chunks_data.seek(read_start) cached_data = {} while chunks_data_len > CACHE_CHUNK_SIZE: chunk = chunks_data.read(CACHE_CHUNK_SIZE) chunk_len = len(chunk) chunks_data_len -= chunk_len read_start += chunk_len # Note that we wrap the chunk in a list so that the cache # backend won't try to perform any conversion on the string. cached_data[make_cache_key('%s-%d' % (key, i))] = [chunk] i += 1 # Store the keys in the cache in a single request. cache.set_many(cached_data, expiration) # Reposition back at the end of the stream. chunks_data.seek(0, 2) if chunks_data_len > 0: # There's one last bit of data to store. Note that this should be # less than the size of a chunk, assert chunks_data_len <= CACHE_CHUNK_SIZE chunks_data.seek(read_start) chunk = chunks_data.read() cache.set(make_cache_key('%s-%d' % (key, i)), [chunk], expiration) i += 1 cache.set(make_cache_key(key), '%d' % i, expiration)
def get(self, request, *args, **kwargs): """Handle GET requests for this view. This will create the renderer for the diff fragment and render it in order to get the PatchError information. It then returns a response with a zip file containing all the debug data. If no PatchError occurred, this will return a 404. Args: request (django.http.HttpRequest): The HTTP request. *args (tuple): Additional positional arguments for the view. **kwargs (dict): Additional keyword arguments for the view. Returns: django.http.HttpResponse: A response containing the data bundle. """ try: renderer_settings = self._get_renderer_settings(**kwargs) etag = self.make_etag(renderer_settings, **kwargs) if etag_if_none_match(request, etag): return HttpResponseNotModified() diff_info_or_response = self.process_diffset_info(**kwargs) if isinstance(diff_info_or_response, HttpResponse): return diff_info_or_response except Http404: return HttpResponseNotFound() except Exception as e: logging.exception( '%s.get: Error when processing diffset info for filediff ' 'ID=%s, interfilediff ID=%s, chunk_index=%s: %s', self.__class__.__name__, kwargs.get('filediff_id'), kwargs.get('interfilediff_id'), kwargs.get('chunk_index'), e, request=request) return HttpResponseServerError() kwargs.update(diff_info_or_response) try: context = self.get_context_data(**kwargs) renderer = self.create_renderer( context=context, renderer_settings=renderer_settings, *args, **kwargs) renderer.render_to_response(request) except PatchError as e: patch_error = e except Exception as e: logging.exception( '%s.get: Error when rendering diffset for filediff ID=%s, ' 'interfilediff ID=%s, chunk_index=%s: %s', self.__class__.__name__, kwargs.get('filediff_id'), kwargs.get('interfilediff_id'), kwargs.get('chunk_index'), e, request=request) return HttpResponseServerError() else: return HttpResponseNotFound() zip_data = StringIO() with ZipFile(zip_data, 'w') as zipfile: basename = os.path.basename(patch_error.filename) zipfile.writestr('%s.orig' % basename, patch_error.orig_file) zipfile.writestr('%s.diff' % basename, patch_error.diff) if patch_error.rejects: zipfile.writestr('%s.rej' % basename, patch_error.rejects) if patch_error.new_file: zipfile.writestr('%s.new' % basename, patch_error.new_file) rsp = HttpResponse(zip_data.getvalue(), content_type='application/zip') rsp['Content-Disposition'] = \ 'attachment; filename=%s.zip' % basename return rsp
def parse(self): """ Parses the diff, returning a list of File objects representing each file in the diff. """ self.files = [] i = 0 preamble = StringIO() while i < len(self.lines): next_i, file_info, new_diff = self._parse_diff(i) if file_info: if self.files: self.files[-1].finalize() self._ensure_file_has_required_fields(file_info) file_info.prepend_data(preamble.getvalue()) preamble.close() preamble = StringIO() self.files.append(file_info) elif new_diff: # We found a diff, but it was empty and has no file entry. # Reset the preamble. preamble.close() preamble = StringIO() else: preamble.write(self.lines[i]) preamble.write(b'\n') i = next_i try: if self.files: self.files[-1].finalize() elif preamble.getvalue().strip() != b'': # This is probably not an actual git diff file. raise DiffParserError('This does not appear to be a git diff', 0) finally: preamble.close() return self.files
def __init__(self): self.buffer = StringIO() self.offset = 0
def process_response(self, request, response): if settings.YADP_ENABLED and settings.YADP_PROFILE_PARAMETER in request.REQUEST: if self.error: return text_response(response, self.error) self.profiler.create_stats() mode = request.REQUEST[settings.YADP_PROFILE_PARAMETER] if mode == 'file': # Work around bug on Python versions >= 2.7.4 mode = 'fil' if not mode: if not which('dot'): return text_response( response, _('Could not find "dot" from Graphviz; please install Graphviz to enable call graph generation' )) if not which('gprof2dot.py'): return text_response( response, _('Could not find gprof2dot.py, which should have been installed by yet-another-django-profiler' )) with tempfile.NamedTemporaryFile() as stats: stats.write(marshal.dumps(self.profiler.stats)) stats.flush() cmd = ('gprof2dot.py -f pstats {} | dot -Tpdf'.format( stats.name)) process = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE) output = process.communicate()[0] return_code = process.poll() if return_code: raise Exception( _('gprof2dot.py exited with {return_code}').format( return_code=return_code)) response.content = output response['Content-Type'] = 'application/pdf' return response elif mode == 'help': return text_response(response, ProfilerMiddleware.__doc__) else: out = StringIO() stats = pstats.Stats(self.profiler, stream=out) with mock.patch( 'pstats.func_strip_path') as mock_func_strip_path: mock_func_strip_path.side_effect = func_strip_path stats.strip_dirs() restrictions = [] if settings.YADP_PATTERN_PARAMETER in request.REQUEST: restrictions.append( request.REQUEST[settings.YADP_PATTERN_PARAMETER]) if settings.YADP_FRACTION_PARAMETER in request.REQUEST: restrictions.append( float( request.REQUEST[settings.YADP_FRACTION_PARAMETER])) elif settings.YADP_MAX_CALLS_PARAMETER in request.REQUEST: restrictions.append( int(request.REQUEST[ settings.YADP_MAX_CALLS_PARAMETER])) elif settings.YADP_PATTERN_PARAMETER not in request.REQUEST: restrictions.append(.2) stats.sort_stats(mode).print_stats(*restrictions) return text_response(response, out.getvalue()) return response
class UnicodeWriter(object): """Convert a two-dimensional data structure into a UTF-8-encoded CSV byte string. Inspired by <http://docs.python.org/library/csv.html#examples>. """ # FixMe: This must go when dropping support for Python 2. def __init__(self, stream=None, dialect=csv.excel_tab, encoding="utf-8", **kwargs): """Additional keyword arguments are passed to the ``csv.writer`` factory function in Python's ``csv`` module. After having instantiated this class, you can use `writerow` and `writerows` to add data to it, and then extract it in the CSV format using `getvalue`. :param stream: the writable file-like object where the output should be sent; if ``None``, you must get the outout with `getvalue`. :param dialect: the CSV format; it defaults to Excel's TAB format (TAB-separated, double-quotes) :param encoding: name of the output encoding to be used; defaults to UTF-8 :type stream: file :type dialect: ``csv.Dialect`` :type encoding: str """ import codecs self.queue = StringIO() self.writer = csv.writer(self.queue, dialect=dialect, **kwargs) self.stream = stream if stream else StringIO() self.encoder = codecs.getincrementalencoder(encoding)() def writerow(self, row): """Add the given row to the output. :param row: list of the table cells :type row: list of object """ output_row = [] for s in row: if s is None: output_row.append("") else: output_row.append(six.text_type(s).encode("utf-8")) self.writer.writerow(output_row) data = self.queue.getvalue() data = data.decode("utf-8") data = self.encoder.encode(data) self.stream.write(data) self.queue.truncate(0) def writerows(self, rows): """Add the given rows to the output. :param rows: list of rows; each row is a list of table cells :type rows: list of list of object """ for row in rows: self.writerow(row) def getvalue(self): """Get the output so far. Normally, you will call this method after the instance was filled with all data. Thus, after called this method, the instance of ``UnicodeWriter`` is no longer used. :return: the table in CSV format, as an encoded octet string :rtype: str """ return self.stream.getvalue()
def parse(self): """ Parses the diff, returning a list of File objects representing each file in the diff. """ logging.debug("DiffParser.parse: Beginning parse of diff, size = %s", len(self.data)) preamble = StringIO() self.files = [] parsed_file = None i = 0 # Go through each line in the diff, looking for diff headers. while i < len(self.lines): next_linenum, new_file = self.parse_change_header(i) if new_file: # This line is the start of a new file diff. # # First, finalize the last one. if self.files: self.files[-1].finalize() parsed_file = new_file # We need to prepend the preamble, if we have one. parsed_file.prepend_data(preamble.getvalue()) preamble.close() preamble = StringIO() self.files.append(parsed_file) i = next_linenum else: if parsed_file: i = self.parse_diff_line(i, parsed_file) else: preamble.write(self.lines[i]) preamble.write(b'\n') i += 1 if self.files: self.files[-1].finalize() preamble.close() logging.debug("DiffParser.parse: Finished parsing diff.") return self.files
def _install_extension_media_internal(self, ext_class): """Installs extension data. Performs any installation necessary for an extension. If the extension has a legacy htdocs/ directory for static media files, they will be installed into MEDIA_ROOT/ext/, and a warning will be logged. If the extension has a modern static/ directory, they will be installed into STATIC_ROOT/ext/. """ ext_htdocs_path = ext_class.info.installed_htdocs_path ext_htdocs_path_exists = os.path.exists(ext_htdocs_path) if ext_htdocs_path_exists: # First, get rid of the old htdocs contents, so we can start # fresh. shutil.rmtree(ext_htdocs_path, ignore_errors=True) if pkg_resources.resource_exists(ext_class.__module__, 'htdocs'): # This is an older extension that doesn't use the static file # support. Log a deprecation notice and then install the files. logging.warning('The %s extension uses the deprecated "htdocs" ' 'directory for static files. It should be updated ' 'to use a "static" directory instead.' % ext_class.info.name) extracted_path = \ pkg_resources.resource_filename(ext_class.__module__, 'htdocs') shutil.copytree(extracted_path, ext_htdocs_path, symlinks=True) # We only want to install static media on a non-DEBUG install. # Otherwise, we run the risk of creating a new 'static' directory and # causing Django to look up all static files (not just from # extensions) from there instead of from their source locations. if not settings.DEBUG: ext_static_path = ext_class.info.installed_static_path ext_static_path_exists = os.path.exists(ext_static_path) if ext_static_path_exists: # Also get rid of the old static contents. shutil.rmtree(ext_static_path, ignore_errors=True) if pkg_resources.resource_exists(ext_class.__module__, 'static'): extracted_path = \ pkg_resources.resource_filename(ext_class.__module__, 'static') shutil.copytree(extracted_path, ext_static_path, symlinks=True) # Mark the extension as installed ext_class.registration.installed = True ext_class.registration.save() # Now let's build any tables that this extension might need self._add_to_installed_apps(ext_class) # Call syncdb to create the new tables loading.cache.loaded = False call_command('syncdb', verbosity=0, interactive=False) # Run evolve to do any table modification try: stream = StringIO() evolution = Evolution() evolution.style = no_style() evolution.execute(verbosity=0, interactive=False, execute=True, hint=False, compile_sql=False, purge=False, database=False, stdout=stream, stderr=stream) output = stream.getvalue() if output: logging.info('Evolved extension models for %s: %s', ext_class.id, stream.read()) stream.close() except CommandError as e: # Something went wrong while running django-evolution, so # grab the output. We can't raise right away because we # still need to put stdout back the way it was output = stream.getvalue() stream.close() logging.error('Error evolving extension models: %s: %s', e, output, exc_info=1) load_error = self._store_load_error(ext_class.id, output) raise InstallExtensionError(six.text_type(e), load_error) # Remove this again, since we only needed it for syncdb and # evolve. _init_extension will add it again later in # the install. self._remove_from_installed_apps(ext_class) # Mark the extension as installed ext_class.registration.installed = True ext_class.registration.save()
class ParsedDiffFile(object): """A parsed file from a diff. This stores information on a single file represented in a diff, including the contents of that file's diff, as parsed by :py:class:`DiffParser` or one of its subclasses. Parsers should set the attributes on this based on the contents of the diff, and should add any data found in the diff. This class is meant to be used internally and by subclasses of :py:class:`DiffParser`. """ def __init__(self): """Initialize the parsed file information.""" self.origFile = None self.newFile = None self.origInfo = None self.newInfo = None self.origChangesetId = None self.binary = False self.deleted = False self.moved = False self.copied = False self.insert_count = 0 self.delete_count = 0 self._data_io = StringIO() self._data = None @property def data(self): """The data for this diff. This must be accessed after :py:meth:`finalize` has been called. """ if self._data is None: raise ValueError('ParsedDiffFile.data cannot be accessed until ' 'finalize() is called.') return self._data def finalize(self): """Finalize the parsed diff. This makes the diff data available to consumers and closes the buffer for writing. """ self._data = self._data_io.getvalue() self._data_io.close() def prepend_data(self, data): """Prepend data to the buffer. Args: data (bytes): The data to prepend. """ if data: new_data_io = StringIO() new_data_io.write(data) new_data_io.write(self._data_io.getvalue()) self._data_io.close() self._data_io = new_data_io def append_data(self, data): """Append data to the buffer. Args: data (bytes): The data to append. """ if data: self._data_io.write(data)