示例#1
0
 def delete(self, *args, **kwargs):
     if self.inprogress_file_upload_key:
         try:
             minio.abort_multipart_upload(minio.UserFilesBucket,
                                          self.inprogress_file_upload_key,
                                          self.inprogress_file_upload_id)
         except minio.error.NoSuchUpload:
             pass
     minio.remove_recursive(minio.UserFilesBucket,
                            self.uploaded_file_prefix)
     CachedRenderResult.clear_wf_module(self)
     super().delete(*args, **kwargs)
示例#2
0
 def test_delete_ignore_inprogress_file_upload_not_on_s3(self):
     workflow = Workflow.create_and_init()
     upload_id = minio.create_multipart_upload(minio.UserFilesBucket, 'key', 'file.csv')
     wf_module = workflow.tabs.first().wf_modules.create(
         order=0,
         inprogress_file_upload_id=upload_id,
         inprogress_file_upload_key='key',
         inprogress_file_upload_last_accessed_at=timezone.now(),
     )
     # Delete from S3, and then delete.
     #
     # This mimics a behavior we want: upload timeouts. We can set up a
     # S3-side policy to delete old uploaded data; we need to expect that
     # data might be deleted when we delete the WfModule.
     minio.abort_multipart_upload(minio.UserFilesBucket, 'key', upload_id)
     wf_module.delete()  # do not crash
示例#3
0
    def test_abort_multipart_upload_upload_already_aborted(self):
        user = User.objects.create(username='******', email='*****@*****.**')
        workflow = Workflow.create_and_init(owner=user)
        upload_id = minio.create_multipart_upload(minio.UserFilesBucket,
                                                  'key', 'file.csv')
        minio.abort_multipart_upload(minio.UserFilesBucket, 'key', upload_id)
        wf_module = workflow.tabs.first().wf_modules.create(
            order=0,
            module_id_name='x',
            inprogress_file_upload_id=upload_id,
            inprogress_file_upload_key='key',
            inprogress_file_upload_last_accessed_at=timezone.now()
        )

        response = self.run_handler(abort_multipart_upload, user=user,
                                    workflow=workflow, wfModuleId=wf_module.id,
                                    uploadId=upload_id)
        self.assertResponse(response, data=None)
        # Must remove data from the DB even if the file isn't in minio.
        wf_module.refresh_from_db()
        self.assertIsNone(wf_module.inprogress_file_upload_id)
        self.assertIsNone(wf_module.inprogress_file_upload_key)
        self.assertIsNone(wf_module.inprogress_file_upload_last_accessed_at)
示例#4
0
    def abort_inprogress_upload(self):
        """
        Delete data from S3 marked as in-progress uploads by  `wf_module`.

        * Delete incomplete multi-part upload
        * Delete completed upload, multipart or otherwise
        * Set `.inprogress_file_upload_*` to `None` (and save those fields)
        * Never raise `NoSuchUpload` or `FileNotFoundError`.
        """
        if (not self.inprogress_file_upload_id
                and not self.inprogress_file_upload_key):
            return

        if self.inprogress_file_upload_id:
            # If we're uploading a multipart file, delete all parts
            try:
                minio.abort_multipart_upload(minio.UserFilesBucket,
                                             self.inprogress_file_upload_key,
                                             self.inprogress_file_upload_id)
            except minio.error.NoSuchUpload:
                pass
        if self.inprogress_file_upload_key:
            # If we _nearly_ completed a multipart upload, or if we wrote data via
            # regular upload but didn't mark it completed, delete the file
            try:
                minio.remove(minio.UserFilesBucket,
                             self.inprogress_file_upload_key)
            except FileNotFoundError:
                pass
        self.inprogress_file_upload_id = None
        self.inprogress_file_upload_key = None
        self.inprogress_file_upload_last_accessed_at = None
        self.save(update_fields=[
            'inprogress_file_upload_id', 'inprogress_file_upload_key',
            'inprogress_file_upload_last_accessed_at'
        ])
示例#5
0
    def test_abort_multipart_upload_happy_path(self):
        user = User.objects.create(username='******', email='*****@*****.**')
        workflow = Workflow.create_and_init(owner=user)
        upload_id = minio.create_multipart_upload(minio.UserFilesBucket,
                                                  'key', 'file.csv')
        wf_module = workflow.tabs.first().wf_modules.create(
            order=0,
            module_id_name='x',
            inprogress_file_upload_id=upload_id,
            inprogress_file_upload_key='key',
            inprogress_file_upload_last_accessed_at=timezone.now()
        )

        response = self.run_handler(abort_multipart_upload, user=user,
                                    workflow=workflow, wfModuleId=wf_module.id,
                                    uploadId=upload_id)
        self.assertResponse(response, data=None)
        wf_module.refresh_from_db()
        self.assertIsNone(wf_module.inprogress_file_upload_id)
        self.assertIsNone(wf_module.inprogress_file_upload_key)
        self.assertIsNone(wf_module.inprogress_file_upload_last_accessed_at)
        with self.assertRaises(minio.error.NoSuchUpload):
            minio.abort_multipart_upload(minio.UserFilesBucket, 'key',
                                         upload_id)