def handle(self, *args, **options): # We only collectstatic on non-debug. On debug, we don't upload to # minio because we'd need to run collectstatic every reboot, which # would be too slow/complex-to-set-up. if not settings.DEBUG: management.call_command("collectstatic", "--no-input") minio.ensure_bucket_exists(minio.UserFilesBucket) minio.ensure_bucket_exists(minio.StoredObjectsBucket) minio.ensure_bucket_exists(minio.ExternalModulesBucket) minio.ensure_bucket_exists(minio.CachedRenderResultsBucket) # ICK ugly hack. TODO there must be a better place to make uploaded # files readable for integration tests.... if settings.MINIO_BUCKET_PREFIX == "integrationtest": minio.ensure_bucket_exists(minio.StaticFilesBucket) minio.client.put_bucket_policy( Bucket=minio.StaticFilesBucket, Policy=STATIC_FILES_BUCKET_POLICY ) # No need to enable CORS for minio-served buckets: # "Minio enables CORS by default on all buckets for all HTTP verbs" # https://docs.min.io/docs/minio-server-limits-per-tenant.html # Migrate comes last: during deploy, in some cases, migration can make # the site unusable until it's completed. So don't add any instructions # _after_ this, because that will increase our downtime if we're # unfortunate enough to cause downtime. management.call_command("migrate")
def copy_module_code_to_s3(apps, schema_editor): ModuleVersion = apps.get_model('server', 'ModuleVersion') minio.ensure_bucket_exists(minio.ExternalModulesBucket) for mv in ModuleVersion.objects.all(): code_path = ( Path(__file__).parent.parent.parent # .../cjworkbench / 'importedmodules' / mv.id_name / mv.source_version_hash ) minio.fput_directory_contents( minio.ExternalModulesBucket, '%s/%s' % (mv.id_name, mv.source_version_hash), code_path )
def clear_minio(): buckets = ( minio.UserFilesBucket, minio.StoredObjectsBucket, minio.ExternalModulesBucket, minio.CachedRenderResultsBucket, ) if not hasattr(clear_minio, '_initialized'): # Ensure buckets exist -- only on first call for bucket in buckets: minio.ensure_bucket_exists(bucket) clear_minio._initialized = True for bucket in buckets: minio.remove_recursive(bucket, '/', force=True)
def clear_minio(): buckets = ( minio.UserFilesBucket, minio.StoredObjectsBucket, minio.ExternalModulesBucket, minio.CachedRenderResultsBucket, ) if not hasattr(clear_minio, '_initialized'): # Ensure buckets exist -- only on first call for bucket in buckets: minio.ensure_bucket_exists(bucket) clear_minio._initialized = True for bucket in buckets: keys = [ o.object_name for o in minio.minio_client.list_objects_v2(bucket, recursive=True) if not o.is_dir ] if keys: for err in minio.minio_client.remove_objects(bucket, keys): raise err
def handle(self, *args, **options): # We only collectstatic on non-debug. On debug, we don't upload to # minio because we'd need to run collectstatic every reboot, which # would be too slow/complex-to-set-up. if not settings.DEBUG: management.call_command('collectstatic', '--no-input') minio.ensure_bucket_exists(minio.UserFilesBucket) minio.ensure_bucket_exists(minio.StoredObjectsBucket) minio.ensure_bucket_exists(minio.ExternalModulesBucket) minio.ensure_bucket_exists(minio.CachedRenderResultsBucket) # ICK ugly hack. TODO there must be a better place to make uploaded # files readable for integration tests.... if settings.MINIO_BUCKET_PREFIX == 'integrationtest': minio.ensure_bucket_exists(minio.StaticFilesBucket) minio.minio_client.set_bucket_policy(minio.StaticFilesBucket, BUCKET_POLICY) # Migrate comes last: during deploy, in some cases, migration can make # the site unusable until it's completed. So don't add any instructions # _after_ this, because that will increase our downtime if we're # unfortunate enough to cause downtime. management.call_command('migrate')
def __init__(self, *args, **kwargs): Storage.__init__(self, *args, **kwargs) ensure_bucket_exists(StaticFilesBucket)
def setUp(self): minio.ensure_bucket_exists(Bucket) _clear()
from contextlib import contextmanager from pathlib import Path import unittest import numpy as np import pandas as pd from pandas.testing import assert_frame_equal from server import minio, parquet bucket = minio.CachedRenderResultsBucket key = 'key.par' minio.ensure_bucket_exists(bucket) class ParquetTest(unittest.TestCase): @contextmanager def _file_on_s3(self, relpath): path = Path(__file__).parent / 'test_data' / relpath try: minio.fput_file(bucket, key, path) yield finally: minio.remove(bucket, key) def test_read_header_issue_361(self): # https://github.com/dask/fastparquet/issues/361 with self._file_on_s3('fastparquet-issue-361.par'): header = parquet.read_header(bucket, key) self.assertEqual(header.columns, []) self.assertEqual(header.count, 3)