def new_file(self, *args, **kwargs):
     super(MemoryFileUploadHandler, self).new_file(*args, **kwargs)
     fileName = args[1]
     if not fileName.endswith('.csv'):
         self.request.session['wrong_file_type_%s' % self.cache_key] = True
         self.request.session.save()
         raise SkipFile("wrong_file_type:%s" % fileName)
     if self.activated:
         self.file = BytesIO()
         raise StopFutureHandlers()
Exemple #2
0
    def receive_data_chunk(self, raw_data, start):
        """Checks if a chunk of data will take the upload over the limit."""
        if start + len(raw_data) > self.max_size:
            formatted_file_size = filesizeformat(self.max_size)
            error_msg = self.FILE_TOO_LARGE_MESSAGE.format(
                file_name=self.file_name,
                max_size=formatted_file_size,
            )
            django_messages.error(self.request, error_msg)

            raise SkipFile()
        return raw_data
Exemple #3
0
    def new_file(self, field_name, file_name, *args, **kwargs):

        super(SecureFileUploadHandler, self).new_file(field_name, file_name,
                                                      *args, **kwargs)

        if self.passphrase:
            kwargs['clear_filename'] = file_name
            for attr in ('passphrase', 'expire_date', 'one_time'):
                kwargs[attr] = getattr(self, attr, None)
            self.file = EncryptedUploadedFile(*args, **kwargs)
        else:
            raise SkipFile('No passphrase')
Exemple #4
0
  def receive_data_chunk(self, raw_data, start):
    LOG.debug("HDFSfileUploadHandler receive_data_chunk")

    if not self._activated:
      if self.request.META.get('PATH_INFO').startswith('/filebrowser') and self.request.META.get('PATH_INFO') != '/filebrowser/upload/archive':
        raise SkipFile()
      return raw_data

    try:
      self._file.write(raw_data)
      self._file.flush()
      return None
    except IOError:
      LOG.exception('Error storing upload data in temporary file "%s"' %
                    (self._file.get_temp_path(),))
      raise StopUpload()
 def receive_data_chunk(self, raw_data, start):
     self.total_upload += len(raw_data)
     if self.total_upload >= self.maxsize:
         self.toobig = True
         raise SkipFile()
     return raw_data
Exemple #6
0
    def handle_raw_input(self,
                         input_data,
                         META,
                         content_length,
                         boundary,
                         encoding=None):
        """
        Parse the raw input from the HTTP request and split items into fields
        and files, executing callback methods as necessary.

        Shamelessly adapted and borrowed from
        django.http.multiparser.MultiPartParser.
        """
        # following suit from the source class, this is imported here to avoid
        # a potential circular import
        from django.http import QueryDict

        # create return values
        self.POST = QueryDict('', mutable=True)
        self.FILES = MultiValueDict()

        # initialize the parser and stream
        stream = LazyStream(ChunkIter(input_data, self.chunk_size))
        # whether or not to signal a file-completion at the beginning
        # of the loop.
        old_field_name = None
        counter = 0

        try:
            for item_type, meta_data, field_stream in Parser(stream, boundary):
                if old_field_name:
                    # we run this test at the beginning of the next loop since
                    # we cannot be sure a file is complete until we hit the
                    # next boundary/part of the multipart content.
                    file_obj = self.file_complete(counter)

                    if file_obj:
                        # if we return a file object, add it to the files dict
                        self.FILES.appendlist(
                            force_text(old_field_name,
                                       encoding,
                                       errors='replace'), file_obj)

                    # wipe it out to prevent havoc
                    old_field_name = None
                try:
                    disposition = meta_data['content-disposition'][1]
                    field_name = disposition['name'].strip()
                except (KeyError, IndexError, AttributeError):
                    continue

                transfer_encoding = meta_data.get('content-transfer-encoding')

                if transfer_encoding is not None:
                    transfer_encoding = transfer_encoding[0].strip()

                field_name = force_text(field_name, encoding, errors='replace')

                if item_type == FIELD:
                    # this is a POST field
                    if transfer_encoding == "base64":
                        raw_data = field_stream.read()
                        try:
                            data = str(raw_data).decode('base64')
                        except:
                            data = raw_data
                    else:
                        data = field_stream.read()

                    self.POST.appendlist(
                        field_name, force_text(data,
                                               encoding,
                                               errors='replace'))

                    # trigger listener
                    self.field_parsed(field_name, self.POST.get(field_name))
                elif item_type == FILE:
                    # this is a file
                    file_name = disposition.get('filename')

                    if not file_name:
                        continue

                    # transform the file name
                    file_name = force_text(file_name,
                                           encoding,
                                           errors='replace')
                    file_name = self.IE_sanitize(unescape_entities(file_name))

                    content_type = meta_data.get('content-type',
                                                 ('', ))[0].strip()

                    try:
                        charset = meta_data.get('content-type', (0, {}))[1]\
                            .get('charset', None)
                    except:
                        charset = None

                    try:
                        file_content_length = int(
                            meta_data.get('content-length')[0])
                    except (IndexError, TypeError, ValueError):
                        file_content_length = None

                    counter = 0

                    # now, do the important file stuff
                    try:
                        # alert on the new file
                        kwargs = {
                            'content_type': content_type,
                            'content_length': file_content_length,
                            'charset': charset
                        }
                        self.new_file(field_name, file_name, **kwargs)

                        # chubber-chunk it
                        for chunk in field_stream:
                            # we need AES compatibles blocks (multiples of 16 bits)
                            over_bytes = len(chunk) % 16
                            if over_bytes:
                                over_chunk =\
                                    field_stream.read(16 - over_bytes)
                                chunk += over_chunk

                            if transfer_encoding == "base64":
                                try:
                                    chunk = base64.b64decode(chunk)
                                except Exception as e:
                                    # since this is anly a chunk, any
                                    # error is an unfixable error
                                    raise MultiPartParserError(
                                        "Could not decode base64 data: %r" % e)

                            chunk_length = len(chunk)
                            self.receive_data_chunk(chunk, counter)
                            counter += chunk_length

                            if counter > settings.UPLOAD_FILE_SIZE_LIMIT:
                                raise SkipFile('File is too big.')
                            # ... and we're done
                    except SkipFile:
                        # just eat the rest
                        exhaust(field_stream)
                    else:
                        # handle file upload completions on next iteration
                        old_field_name = field_name

        except StopUpload as e:
            # if we get a request to stop the upload,
            # exhaust it if no con reset
            if not e.connection_reset:
                exhaust(input_data)
        else:
            # make sure that the request data is all fed
            exhaust(input_data)

        # signal the upload has been completed
        self.upload_complete()

        return self.POST, self.FILES