def safe_join(base, *paths): """ Join one or more path components to the base path component intelligently. Return a normalized, absolute version of the final path. Raise ValueError if the final path isn't located inside of the base path component. """ base = force_text(base) paths = [force_text(p) for p in paths] final_path = abspath(join(base, *paths)) base_path = abspath(base) # Ensure final_path starts with base_path (using normcase to ensure we # don't false-negative on case insensitive operating systems like Windows), # further, one of the following conditions must be true: # a) The next character is the path separator (to prevent conditions like # safe_join("/dir", "/../d")) # b) The final path must be the same as the base path. # c) The base path must be the most root path (meaning either "/" or "C:\\") if (not normcase(final_path).startswith(normcase(base_path + sep)) and normcase(final_path) != normcase(base_path) and dirname(normcase(base_path)) != normcase(base_path)): raise SuspiciousFileOperation( 'The joined path ({}) is located outside of the base path ' 'component ({})'.format(final_path, base_path)) return final_path
def __init__(self, param, cursor, strings_only=False): # With raw SQL queries, datetimes can reach this function # without being converted by DateTimeField.get_db_prep_value. if settings.USE_TZ and (isinstance(param, datetime.datetime) and not isinstance(param, Oracle_datetime)): param = Oracle_datetime.from_datetime(param) string_size = 0 # Oracle doesn't recognize True and False correctly. if param is True: param = 1 elif param is False: param = 0 if hasattr(param, 'bind_parameter'): self.force_bytes = param.bind_parameter(cursor) elif isinstance(param, (Database.Binary, datetime.timedelta)): self.force_bytes = param else: # To transmit to the database, we need Unicode if supported # To get size right, we must consider bytes. self.force_bytes = force_text(param, cursor.charset, strings_only) if isinstance(self.force_bytes, str): # We could optimize by only converting up to 4000 bytes here string_size = len( force_bytes(param, cursor.charset, strings_only)) if hasattr(param, 'input_size'): # If parameter has `input_size` attribute, use that. self.input_size = param.input_size elif string_size > 4000: # Mark any string param greater than 4000 characters as a CLOB. self.input_size = Database.CLOB elif isinstance(param, datetime.datetime): self.input_size = Database.TIMESTAMP else: self.input_size = None
def fields(self): "Return a list of fields in the Feature." return [ force_text(capi.get_field_name( capi.get_field_defn(self._layer._ldefn, i)), self.encoding, strings_only=True) for i in range(self.num_fields) ]
def verify_ogr_field(self, ogr_field, model_field): """ Verify if the OGR Field contents are acceptable to the model field. If they are, return the verified value, otherwise raise an exception. """ if (isinstance(ogr_field, OFTString) and isinstance(model_field, (models.CharField, models.TextField))): if self.encoding: # The encoding for OGR data sources may be specified here # (e.g., 'cp437' for Census Bureau boundary files). val = force_text(ogr_field.value, self.encoding) else: val = ogr_field.value if model_field.max_length and len(val) > model_field.max_length: raise InvalidString('%s model field maximum string length is %s, given %s characters.' % (model_field.name, model_field.max_length, len(val))) elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField): try: # Creating an instance of the Decimal value to use. d = Decimal(str(ogr_field.value)) except DecimalInvalidOperation: raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value) # Getting the decimal value as a tuple. dtup = d.as_tuple() digits = dtup[1] d_idx = dtup[2] # index where the decimal is # Maximum amount of precision, or digits to the left of the decimal. max_prec = model_field.max_digits - model_field.decimal_places # Getting the digits to the left of the decimal place for the # given decimal. if d_idx < 0: n_prec = len(digits[:d_idx]) else: n_prec = len(digits) + d_idx # If we have more than the maximum digits allowed, then throw an # InvalidDecimal exception. if n_prec > max_prec: raise InvalidDecimal( 'A DecimalField with max_digits %d, decimal_places %d must ' 'round to an absolute value less than 10^%d.' % (model_field.max_digits, model_field.decimal_places, max_prec) ) val = d elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField): # Attempt to convert any OFTReal and OFTString value to an OFTInteger. try: val = int(ogr_field.value) except ValueError: raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value) else: val = ogr_field.value return val
def fields(self): """ Return a list of string names corresponding to each of the Fields available in this Layer. """ return [ force_text(capi.get_field_name(capi.get_field_defn(self._ldefn, i)), self._ds.encoding, strings_only=True) for i in range(self.num_fields) ]
def handle_file_complete(self, old_field_name, counters): """ Handle all the signaling that takes place when a file is complete. """ for i, handler in enumerate(self._upload_handlers): file_obj = handler.file_complete(counters[i]) if file_obj: # If it returns a file object, then set the files dict. self._files.appendlist( force_text(old_field_name, self._encoding, errors='replace'), file_obj) break
def units(self): """ Return a 2-tuple of the units value and the units name. Automatically determine whether to return the linear or angular units. """ units, name = None, None if self.projected or self.local: units, name = capi.linear_units(self.ptr, byref(c_char_p())) elif self.geographic: units, name = capi.angular_units(self.ptr, byref(c_char_p())) if name is not None: name = force_text(name) return (units, name)
def to_string(s): return force_text(s, strings_only=True, errors='replace')
def parse(self): """ Parse the POST data and break it into a FILES MultiValueDict and a POST MultiValueDict. Return a tuple containing the POST and FILES dictionary, respectively. """ from server.http import QueryDict encoding = self._encoding handlers = self._upload_handlers # HTTP spec says that Content-Length >= 0 is valid # handling content-length == 0 before continuing if self._content_length == 0: return QueryDict(encoding=self._encoding), MultiValueDict() # See if any of the handlers take care of the parsing. # This allows overriding everything if need be. for handler in handlers: result = handler.handle_raw_input( self._input_data, self._meta, self._content_length, self._boundary, encoding, ) # Check to see if it was handled if result is not None: return result[0], result[1] # Create the data structures to be used later. self._post = QueryDict(mutable=True) self._files = MultiValueDict() # Instantiate the parser and stream: stream = LazyStream(ChunkIter(self._input_data, self._chunk_size)) # Whether or not to signal a file-completion at the beginning of the loop. old_field_name = None counters = [0] * len(handlers) # Number of bytes that have been read. num_bytes_read = 0 # To count the number of keys in the request. num_post_keys = 0 # To limit the amount of data read from the request. read_size = None try: for item_type, meta_data, field_stream in Parser( stream, self._boundary): if old_field_name: # We run this at the beginning of the next loop # since we cannot be sure a file is complete until # we hit the next boundary/part of the multipart content. self.handle_file_complete(old_field_name, counters) old_field_name = None try: disposition = meta_data['content-disposition'][1] field_name = disposition['name'].strip() except (KeyError, IndexError, AttributeError): continue transfer_encoding = meta_data.get('content-transfer-encoding') if transfer_encoding is not None: transfer_encoding = transfer_encoding[0].strip() field_name = force_text(field_name, encoding, errors='replace') if item_type == FIELD: # Avoid storing more than DATA_UPLOAD_MAX_NUMBER_FIELDS. num_post_keys += 1 if (settings.DATA_UPLOAD_MAX_NUMBER_FIELDS is not None and settings.DATA_UPLOAD_MAX_NUMBER_FIELDS < num_post_keys): raise TooManyFieldsSent( 'The number of GET/POST parameters exceeded ' 'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.') # Avoid reading more than DATA_UPLOAD_MAX_MEMORY_SIZE. if settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None: read_size = settings.DATA_UPLOAD_MAX_MEMORY_SIZE - num_bytes_read # This is a post field, we can just set it in the post if transfer_encoding == 'base64': raw_data = field_stream.read(size=read_size) num_bytes_read += len(raw_data) try: data = base64.b64decode(raw_data) except binascii.Error: data = raw_data else: data = field_stream.read(size=read_size) num_bytes_read += len(data) # Add two here to make the check consistent with the # x-www-form-urlencoded check that includes '&='. num_bytes_read += len(field_name) + 2 if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and num_bytes_read > settings.DATA_UPLOAD_MAX_MEMORY_SIZE): raise RequestDataTooBig( 'Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.' ) self._post.appendlist( field_name, force_text(data, encoding, errors='replace')) elif item_type == FILE: # This is a file, use the handler... file_name = disposition.get('filename') if file_name: file_name = force_text(file_name, encoding, errors='replace') file_name = self.IE_sanitize( unescape_entities(file_name)) if not file_name: continue content_type, content_type_extra = meta_data.get( 'content-type', ('', {})) content_type = content_type.strip() charset = content_type_extra.get('charset') try: content_length = int( meta_data.get('content-length')[0]) except (IndexError, TypeError, ValueError): content_length = None counters = [0] * len(handlers) try: for handler in handlers: try: handler.new_file( field_name, file_name, content_type, content_length, charset, content_type_extra, ) except StopFutureHandlers: break for chunk in field_stream: if transfer_encoding == 'base64': # We only special-case base64 transfer encoding # We should always decode base64 chunks by multiple of 4, # ignoring whitespace. stripped_chunk = b"".join(chunk.split()) remaining = len(stripped_chunk) % 4 while remaining != 0: over_chunk = field_stream.read(4 - remaining) stripped_chunk += b"".join( over_chunk.split()) remaining = len(stripped_chunk) % 4 try: chunk = base64.b64decode(stripped_chunk) except Exception as exc: # Since this is only a chunk, any error is an unfixable error. raise MultiPartParserError( "Could not decode base64 data." ) from exc for i, handler in enumerate(handlers): chunk_length = len(chunk) chunk = handler.receive_data_chunk( chunk, counters[i]) counters[i] += chunk_length if chunk is None: # Don't continue if the chunk received by # the handler is None. break except SkipFile: self._close_files() # Just use up the rest of this file... exhaust(field_stream) else: # Handle file upload completions on next iteration. old_field_name = field_name else: # If this is neither a FIELD or a FILE, just exhaust the stream. exhaust(stream) except StopUpload as e: self._close_files() if not e.connection_reset: exhaust(self._input_data) else: # Make sure that the request data is all fed exhaust(self._input_data) # Signal that the upload has completed. # any() shortcircuits if a handler's upload_complete() returns a value. any(handler.upload_complete() for handler in handlers) self._post._mutable = False return self._post, self._files
def name(self): """ Return the name of this raster. Corresponds to filename for file-based rasters. """ return force_text(capi.get_ds_description(self._ptr))
def last_executed_query(self, cursor, sql, params): # With MySQLdb, cursor objects have an (undocumented) "_executed" # attribute where the exact query sent to the database is saved. # See MySQLdb/cursors.py in the source distribution. return force_text(getattr(cursor, '_executed', None), errors='replace')
def name(self): "Return the name of this Field." name = capi.get_field_name(self.ptr) return force_text(name, encoding=self._feat.encoding, strings_only=True)
def as_string(self): "Retrieve the Field's value as a string." string = capi.get_field_as_string(self._feat.ptr, self._index) return force_text(string, encoding=self._feat.encoding, strings_only=True)
def name(self): "Return the name of this layer in the Data Source." name = capi.get_fd_name(self._ldefn) return force_text(name, self._ds.encoding, strings_only=True)
def name(self): "Return the name of the data source." name = capi.get_ds_name(self._ptr) return force_text(name, self.encoding, strings_only=True)
def name(self): """ Return description/name string for this driver. """ return force_text(rcapi.get_driver_description(self.ptr))
def layer_name(self): "Return the name of the layer for the feature." name = capi.get_feat_name(self._layer._ldefn) return force_text(name, self.encoding, strings_only=True)
def convert_textfield_value(self, value, expression, connection): if value is not None: value = force_text(value) return value
def get_traceback_data(self): """Return a dictionary containing traceback information.""" if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist): self.template_does_not_exist = True self.postmortem = self.exc_value.chain or [self.exc_value] frames = self.get_traceback_frames() for i, frame in enumerate(frames): if 'vars' in frame: frame_vars = [] for k, v in frame['vars']: v = pprint(v) # Trim large blobs of data if len(v) > 4096: v = '%s... <trimmed %d bytes string>' % (v[0:4096], len(v)) frame_vars.append((k, v)) frame['vars'] = frame_vars frames[i] = frame unicode_hint = '' if self.exc_type and issubclass(self.exc_type, UnicodeError): start = getattr(self.exc_value, 'start', None) end = getattr(self.exc_value, 'end', None) if start is not None and end is not None: unicode_str = self.exc_value.args[1] unicode_hint = force_text( unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))], 'ascii', errors='replace') from server import get_version if self.request is None: user_str = None else: try: user_str = str(self.request.user) except Exception: # request.user may raise OperationalError if the database is # unavailable, for example. user_str = '[unable to retrieve the current user]' c = { 'is_email': self.is_email, 'unicode_hint': unicode_hint, 'frames': frames, 'request': self.request, 'user_str': user_str, 'filtered_POST_items': list(self.filter.get_post_parameters(self.request).items()), 'settings': get_safe_settings(), 'sys_executable': sys.executable, 'sys_version_info': '%d.%d.%d' % sys.version_info[0:3], 'server_time': timezone.now(), 'server_version_info': get_version(), 'sys_path': sys.path, 'template_info': self.template_info, 'template_does_not_exist': self.template_does_not_exist, 'postmortem': self.postmortem, } if self.request is not None: c['request_GET_items'] = self.request.GET.items() c['request_FILES_items'] = self.request.FILES.items() c['request_COOKIES_items'] = self.request.COOKIES.items() # Check whether exception info is available if self.exc_type: c['exception_type'] = self.exc_type.__name__ if self.exc_value: c['exception_value'] = str(self.exc_value) if frames: c['lastframe'] = frames[-1] return c