def new_file(self, *args, **kwargs): super(BlobstoreFileUploadHandler, self).new_file(*args, **kwargs) blobkey = self.content_type_extra.get('blob-key') self.active = blobkey is not None if self.active: self.blobkey = BlobKey(blobkey) raise StopFutureHandlers()
def new_file(self, field_name, file_name, content_type, content_length, charset=None, content_type_extra=None): basename, ext = os.path.splitext(file_name) ext = ext.lower() ext = ext.strip('.') self.charset = 'utf-8' #if charset is None: #self.charset = 'utf-8' #else: #self.charset = charset if ext in MOLECULE_EXTENSION_TYPES.keys(): self.filetype = MOLECULE_EXTENSION_TYPES[ext] else: try: raise InvalidMoleculeFileExtension(ext=ext) except Exception as e: self.exception = e raise StopUpload(connection_reset=False) super(TemporaryMoleculeFileUploadHandlerMaxSize, self).new_file(field_name, file_name, content_type, content_length, self.charset, content_type_extra) raise StopFutureHandlers()
def new_file(self, *args, **kwargs): super(TaskUploadHandler, self).new_file(*args, **kwargs) progress0 = TaskUploadProgress(uploaded_task=self.uploaded_task, progress=0) progress0.save() self.file = BytesIO() raise StopFutureHandlers()
def new_file(self, field_name, file_name, *args, **kwargs): super().new_file(*args, **kwargs) print('Using FTPUploadHandler to handle file upload.') currentDT = datetime.datetime.now() self._ftp_file_name = '' #self.FTP_file_name = "t{}__ ".format(currentDT.strftime("%Y-%m-%d %H:%M:%S")) self._ftp_file_name += file_name print("File Name : {}".format(self._ftp_file_name)) # newfile = UploadedFile(BytesIO(), self.FTP_file_name,self.content_type, file_size ) print("Assumed Content Size : {}".format(self._assumed_contentsize)) # self.file = UploadedFile(self.FTP_file_name, self.content_type, self._assumed_contentsize, self.charset, self.content_type_extra) # self._file = FTPFile(BytesIO(), self.FTP_file_name,self.content_type, self._assumed_contentsize) self._file_path = os.path.join(self._ftp_remoteDir, self._ftp_file_name) LOG.error('Upload attempt to %s' % (self._file_path)) # self._activated = True self._starttime = time.time() self._chunk_index = 0 self._processed_size = 0 ftp = FTP(self.ftp_host) ftp.login(self.ftp_username, self.ftp_pwd) # self.cdTree(self.ftp_remoteDir,ftp) self.mkdir_p(ftp, self._ftp_remoteDir) # print("File Path : {}".format(self.file_path)) # print("self.file_name File Name : {}".format(self.file_name)) # print("file_name File Name : {}".format(file_name)) raise StopFutureHandlers()
def new_file(self, field_name, file_name, content_type, content_length, charset=None, content_type_extra=None): """ We can kill a lot of this hackery in Django 1.7 when content_type_extra is actually passed in! """ self.data.seek(0) # Rewind data = self.data.read() parts = data.split(self.boundary) for part in parts: match = re.search('blob-key="?(?P<blob_key>[:a-zA-Z0-9_=-]+)', part) blob_key = match.groupdict().get('blob_key') if match else None if not blob_key: continue # OK, we have a blob key, but is it the one for the field? match = re.search('\sname="?(?P<field_name>[a-zA-Z0-9_-]+)', part) name = match.groupdict().get('field_name') if match else None if name != field_name: # Nope, not for this field continue self.blobkey = blob_key break if self.blobkey: self.blobkey = BlobKey(self.blobkey) raise StopFutureHandlers() else: return super(BlobstoreFileUploadHandler, self).new_file(field_name, file_name, content_type, content_length, charset)
def new_file(self, field_name, file_name, *args, **kwargs): super().new_file(*args, **kwargs) LOG.error('Using FTPUploadHandler to handle file upload.') currentDT = datetime.datetime.now() self.FTP_file_name = '' #self.FTP_file_name = "t{}__ ".format(currentDT.strftime("%Y-%m-%d %H:%M:%S")) self.FTP_file_name += file_name print("File Name : {}".format(self.FTP_file_name)) # newfile = UploadedFile(BytesIO(), self.FTP_file_name,self.content_type, file_size ) print("Content Size : {}".format(self.contentsize)) self._file = FTPFile(BytesIO(), self.FTP_file_name, self.content_type, self.contentsize) self.file_path = os.path.join(self.ftp_remoteDir, self.FTP_file_name) LOG.error('Upload attempt to %s' % (self.file_path)) self._activated = True self._starttime = time.time() self.processed_size = 0 # print("File Path : {}".format(self.file_path)) # print("self.file_name File Name : {}".format(self.file_name)) # print("file_name File Name : {}".format(file_name)) raise StopFutureHandlers()
def new_file(self, field_name, file_name, *args, **kwargs): super().new_file(*args, **kwargs) ' ----- File Name, Path & Remote Directory Configuration ----- ' self._file._uploaded_file_name = file_name name_, ext_ = os.path.splitext(file_name) # print("SELF REQUEST : {}".format(self.request)) data = self.request project_dir = data['project'] + " _ " + data['project_code'] asset_dir = data['asset'] + " _ " + data['asset_code'] self._file._ftp_remoteDir = os.path.join(project_dir, asset_dir) self._file._ftp_file_name = "%s_%s_%s_V(%s)_%s" % ( data['project_code'], data['asset_code'], data['process_department'][0].upper(), data['version'].replace( ".", "_"), data['user'].replace(".", "_")) self._file._ftp_file_name += ext_ self._file._ftp_file_path = os.path.join(self._file._ftp_remoteDir, self._file._ftp_file_name) # print("FTP File Name : {}".format(self._file._ftp_file_name)) ' --------------- ' # print('Uploading File : %s' % (self._file._ftp_file_path)) connect_ftp_and_make_directories() self._file._starttime = time.time() raise StopFutureHandlers()
def new_file(self, *args, **kwargs): super(ContentTypeExtraUploadHandler, self).new_file(*args, **kwargs) self.blobkey = self.content_type_extra.get('blob-key', '') self.file = StringIO() self.file.write(self.blobkey) self.active = self.blobkey is not None if self.active: raise StopFutureHandlers()
def new_file(self, *args, **kwargs): super(AppEngineFileUploadHandler, self).new_file(*args, **kwargs) blobkey = self.content_type_extra.get('blob-key') self.activated = blobkey is not None if self.activated: self.blobkey = BlobKey(blobkey) self.filename = kwargs.get('file_name', None) self.file = StringIO() raise StopFutureHandlers()
def new_file(self, field_name, file_name, content_type, content_length, charset=None, content_type_extra=None): if content_type_extra: self.blobkey = content_type_extra.get('blob-key') if self.blobkey: self.blobkey = BlobKey(self.blobkey) raise StopFutureHandlers() else: return super(BlobstoreFileUploadHandler, self).new_file(field_name, file_name, content_type, content_length, charset)
def new_file(self, *args, **kwargs): super(MemoryFileUploadHandler, self).new_file(*args, **kwargs) fileName = args[1] if not fileName.endswith('.csv'): self.request.session['wrong_file_type_%s' % self.cache_key] = True self.request.session.save() raise SkipFile("wrong_file_type:%s" % fileName) if self.activated: self.file = BytesIO() raise StopFutureHandlers()
def new_file(self, *args, **kwargs): """field_name, file_name, content_type, content_length, charset=None""" logger.debug('BlobstoreFileUploadHandler.new_file') super(BlobstoreFileUploadHandler, self).new_file(*args, **kwargs) blobkey = get_blobkey_from_body(self.request.body) self.active = blobkey is not None if self.active: self.blobkey = BlobKey(blobkey) raise StopFutureHandlers()
def new_file(self, field_name, file_name, *args, **kwargs): # Detect "HDFS" in the field name. if field_name.upper().startswith('HDFS'): try: self._file = HDFStemporaryUploadedFile(self.request, file_name, self._destination) LOG.debug('Upload attempt to %s' % (self._file.get_temp_path(),)) self._activated = True self._starttime = time.time() except Exception, ex: LOG.error("Not using HDFS upload handler: %s" % (ex,)) raise ex raise StopFutureHandlers()
def new_file(self, field_name, file_name, content_type, content_length, charset, content_type_extra): """ A new file is starting, we should prep Cassandra for a new upload """ self.file_name = file_name self.content_type = content_type self.hasher = hashlib.sha256() self.data = None self.uuid = None self.seq_number = 0 raise StopFutureHandlers()
def new_file(self, *args, **kwargs) -> None: super(GCPStreamingFileUploadHandler, self).new_file(*args, **kwargs) self.file = ResumableUpload( self.upload_url, self.chunk_size, ) self.file.initiate( self.transport, self.data, {"name": self.file_name}, self.content_type, stream_final=False, ) raise StopFutureHandlers("Continue resumable upload session")
def new_file(self, field_name, file_name, *args, **kwargs): if self._is_s3_upload(): super(S3FileUploadHandler, self).new_file(field_name, file_name, *args, **kwargs) LOG.info('Using S3FileUploadHandler to handle file upload.') self.target_path = self._fs.join(self.key_name, file_name) # Create a multipart upload request LOG.debug("Initiating S3 multipart upload to target path: %s" % self.target_path) self._mp = self._bucket.initiate_multipart_upload(self.target_path) self.file = SimpleUploadedFile(name=file_name, content='') raise StopFutureHandlers()
def new_file(self, field_name, file_name, *args, **kwargs): super().new_file(*args, **kwargs) try: ' ----- File Name, Path & Remote Directory Configuration ----- ' self._file._uploaded_file_name = file_name data = self.request a, b, c = class_ftp.get_remote_file_configurations( file_name, data['project'], data['project_code'], data['asset'], data['asset_code'], data['process_department'], data['version'], data['user']) self._file._ftp_remoteDir = a self._file._ftp_file_name = b self._file._ftp_file_path = c ' --------------- ' ftp = class_ftp.ftp_connect_host() class_ftp.makeDirPath(self._file._ftp_remoteDir, ftp) class_ftp.archive_existing_duplicate_file( self._file._ftp_file_name, ftp) if ftp: ftp.close() self._file._starttime = time.time() if self._cache_key: data = cache.get(self._cache_key) data['start'] = 1 cache.set(self._cache_key, data) except Exception as e: log.exception(e) raise StopFutureHandlers() raise e raise StopFutureHandlers()
def new_file(self, field_name, file_name, *args, **kwargs): # Detect "HDFS" in the field name. # NOTE: The user is not authenticated at this point, and it's # very difficult to do so because we handle upload before # running the auth middleware. if field_name.upper().startswith('HDFS'): try: self._file = HDFStemporaryUploadedFile(self.request, file_name) except (HDFSerror, IOError), ex: LOG.error("Not using HDFS upload handler: %s" % (ex, )) return LOG.debug('Upload attempt to %s' % (self._file.get_temp_path(), )) self._activated = True self._starttime = time.time() raise StopFutureHandlers()
def new_file(self, field_name, file_name, *args, **kwargs): # Detect "HDFS" in the field name. if field_name.upper().startswith('HDFS'): LOG.info('Using HDFSfileUploadHandler to handle file upload.') try: fs_ref = self.request.REQUEST.get('fs', 'default') self.request.fs = fsmanager.get_filesystem(fs_ref) self.request.fs.setuser(self.request.user.username) self._file = HDFStemporaryUploadedFile(self.request, file_name, self._destination) LOG.debug('Upload attempt to %s' % (self._file.get_temp_path(),)) self._activated = True self._starttime = time.time() except Exception, ex: LOG.error("Not using HDFS upload handler: %s" % (ex,)) self.request.META['upload_failed'] = ex raise StopFutureHandlers()
def new_file(self, field_name, file_name, *args, **kwargs): if self._is_abfs_upload(): super(ABFSFileUploadHandler, self).new_file(field_name, file_name, *args, **kwargs) LOG.info('Using ABFSFileUploadHandler to handle file upload wit temp file%s.' % file_name) self.target_path = self._fs.join(self.destination, file_name) try: # Check access permissions before attempting upload #self._check_access() #implement later LOG.debug("Initiating ABFS upload to target path: %s" % self.target_path) self._fs.create(self.target_path) self.file = SimpleUploadedFile(name=file_name, content='') raise StopFutureHandlers() except (ABFSFileUploadHandler, ABFSFileSystemException) as e: LOG.error("Encountered error in ABFSUploadHandler check_access: %s" % e) self.request.META['upload_failed'] = e raise StopUpload()
def new_file(self, field_name, file_name, *args, **kwargs): if self._is_s3_upload(): super(S3FileUploadHandler, self).new_file(field_name, file_name, *args, **kwargs) LOG.info('Using S3FileUploadHandler to handle file upload.') self.target_path = self._fs.join(self.key_name, file_name) try: # Check access permissions before attempting upload self._check_access() # Create a multipart upload request LOG.debug("Initiating S3 multipart upload to target path: %s" % self.target_path) self._mp = self._bucket.initiate_multipart_upload(self.target_path) self.file = SimpleUploadedFile(name=file_name, content='') raise StopFutureHandlers() except (S3FileUploadError, S3FileSystemException), e: LOG.error("Encountered error in S3UploadHandler check_access: %s" % e) self.request.META['upload_failed'] = e raise StopUpload()
def new_file(self, *args, **kwargs): if not self.activated: raise StopFutureHandlers() return super().new_file(*args, **kwargs)
def new_file(self, *args, **kwargs): super().new_file(*args, **kwargs) if self.activated: self.file = BytesIO() raise StopFutureHandlers()
def new_file(self, field_name, file_name, content_type, content_length, charset=None): if self.content_length > 10000000: raise StopFutureHandlers(_('File too large. Max size restricted to %s bytes') % max_size)