def test_config(self): config = LifecycleConfig([ Rule( ENABLED, rule_filter=Filter(prefix="documents/"), rule_id="rule1", transition=Transition(days=30, storage_class="GLACIER"), ), Rule( ENABLED, rule_filter=Filter(prefix="logs/"), rule_id="rule2", expiration=Expiration(days=365), ), ], ) xml.marshal(config) config = xml.unmarshal( LifecycleConfig, """<LifeCycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <Rule> <ID>DeleteAfterBecomingNonCurrent</ID> <Filter> <Prefix>logs/</Prefix> </Filter> <Status>Enabled</Status> <NoncurrentVersionExpiration> <NoncurrentDays>100</NoncurrentDays> </NoncurrentVersionExpiration> </Rule> <Rule> <ID>TransitionAfterBecomingNonCurrent</ID> <Filter> <Prefix>documents/</Prefix> </Filter> <Status>Enabled</Status> <NoncurrentVersionTransition> <NoncurrentDays>30</NoncurrentDays> <StorageClass>GLACIER</StorageClass> </NoncurrentVersionTransition> </Rule> </LifeCycleConfiguration>""", ) xml.marshal(config)
def test_config(self): config = ReplicationConfig( "REPLACE-WITH-ACTUAL-ROLE", [ Rule( Destination( "REPLACE-WITH-ACTUAL-DESTINATION-BUCKET-ARN", ), ENABLED, delete_marker_replication=DeleteMarkerReplication( DISABLED, ), rule_filter=Filter( AndOperator( "TaxDocs", { "key1": "value1", "key2": "value2" }, ), ), rule_id="rule1", priority=1, ), ], ) xml.marshal(config) config = xml.unmarshal( ReplicationConfig, """<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <Role>arn:aws:iam::35667example:role/CrossRegionReplicationRoleForS3</Role> <Rule> <ID>rule1</ID> <Status>Enabled</Status> <Priority>1</Priority> <DeleteMarkerReplication> <Status>Disabled</Status> </DeleteMarkerReplication> <Filter> <And> <Prefix>TaxDocs</Prefix> <Tag> <Key>key1</Key> <Value>value1</Value> </Tag> <Tag> <Key>key1</Key> <Value>value1</Value> </Tag> </And> </Filter> <Destination> <Bucket>arn:aws:s3:::exampletargetbucket</Bucket> </Destination> </Rule> </ReplicationConfiguration>""", ) xml.marshal(config)
def upload(): """ Handle uploaded DSDL namespace repository archives. This expects either an already made zip archive uploaded as a file or a URL link to a zip archive. Frontend converts GitHub links into zip archives. Takes multipart/form-data (obviously, because we have a file upload). """ build_uuid = str(uuid.uuid4()) storage.make_bucket(build_uuid) config = LifecycleConfig([ Rule( ENABLED, rule_filter=Filter(prefix="uploads/"), rule_id=f"delete_rule_{build_uuid}", expiration=Expiration(days=30), ), ], ) storage.set_bucket_lifecycle(build_uuid, config) try: form = UploadForm(flask.request.form, flask.request.files) except ValidationError as error: return flask.jsonify(error.errors) for file in form.archive_files: size = os.fstat(file.fileno()).st_size storage.put_object(build_uuid, f"uploads/{file.filename}", file, size) # Kick off Celery task to generate DSDL task = generate_dsdl.delay(build_uuid, form.archive_urls, form.target_lang, form.target_endian, form.flags, form.doc_url) return ( flask.jsonify({ "task_url": flask.url_for("api.taskstatus", task_id=task.id), }), 202, )
from minio.replicationconfig import (DeleteMarkerReplication, Destination, ReplicationConfig, Rule) client = Minio( "play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) config = ReplicationConfig( "REPLACE-WITH-ACTUAL-ROLE", [ Rule( Destination("REPLACE-WITH-ACTUAL-DESTINATION-BUCKET-ARN", ), ENABLED, delete_marker_replication=DeleteMarkerReplication(DISABLED, ), rule_filter=Filter( AndOperator( "TaxDocs", { "key1": "value1", "key2": "value2" }, ), ), rule_id="rule1", priority=1, ), ], ) client.set_bucket_replication("my-bucketname", config)
# distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from minio import Minio from minio.commonconfig import ENABLED, Filter from minio.lifecycleconfig import Expiration, LifecycleConfig, Rule, Transition client = Minio( "play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) config = LifecycleConfig([ Rule( ENABLED, rule_filter=Filter(prefix="documents/"), rule_id="rule1", transition=Transition(days=30, storage_class="GLACIER"), ), Rule( ENABLED, rule_filter=Filter(prefix="logs/"), rule_id="rule2", expiration=Expiration(days=365), ), ], ) client.set_bucket_lifecycle("my-bucket", config)
{ "Effect": "Allow", "Principal": { "AWS": "*" }, "Action": "s3:GetObject", "Resource": "arn:aws:s3:::results/*", }, ], } storage.set_bucket_policy("results", json.dumps(policy)) config = LifecycleConfig([ Rule( ENABLED, rule_filter=Filter(prefix="*"), rule_id="delete_rule_results", expiration=Expiration(days=30), ), ], ) storage.set_bucket_lifecycle("results", config) # Setup minio docs bucket # and set public access if not storage.bucket_exists("docs"): storage.make_bucket("docs") policy = { "Version": "2012-10-17", "Statement": [ {
# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are # dummy values, please replace them with original values. from minio import Minio from minio.commonconfig import ENABLED, Filter from minio.lifecycleconfig import Expiration, LifecycleConfig, Rule client = Minio( "play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) config = LifecycleConfig([ Rule( ENABLED, rule_filter=Filter(prefix="logs/"), rule_id="rule2", expiration=Expiration(days=365), ), ], ) client.set_bucket_lifecycle("my-bucketname", config)