def test_get_policy_for_non_existent_bucket(self, mock_connection):
     mock_server = MockConnection()
     mock_connection.return_value = mock_server
     bucket_name = 'non-existent-bucket'
     mock_server.mock_add_request(
         MockResponse(
             'GET',
             'https://localhost:9000/' + bucket_name + '/?policy=',
             {'User-Agent': _DEFAULT_USER_AGENT},
             404,
         ))
     client = Minio('localhost:9000')
     client.get_bucket_policy(bucket_name)
예제 #2
0
    def test_get_policy_for_existent_bucket_with_prefix(
            self, mock_connection
    ):
        mock_data = '{"Version":"2012-10-17","Statement":[{"Action":["s3:GetBucketLocation"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::test-bucket"],"Sid":""},{"Action":["s3:GetBucketLocation"],"Effect":"Allow","Principal":{"AWS":"*"},"Resource":["arn:aws:s3:::test-bucket"],"Sid":""},{"Action":["s3:ListBucket"],"Condition":{"StringEquals":{"s3:prefix":["test-prefix-readonly"]}},"Effect":"Allow","Principal":{"AWS":"*"},"Resource":["arn:aws:s3:::test-bucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":"*"},"Resource":["arn:aws:s3:::test-bucket/test-prefix-readonly*"],"Sid":""}]}'  # NOQA

        mock_server = MockConnection()
        mock_connection.return_value = mock_server

        bucket_name = 'test-bucket'
        prefix_name = 'test-prefix-readonly'

        mock_server.mock_add_request(
            MockResponse(
                'GET',
                'https://localhost:9000/' + bucket_name + '/?policy=',
                {'User-Agent': _DEFAULT_USER_AGENT},
                200,
                content=mock_data
            )
        )

        client = Minio('localhost:9000')

        response = client.get_bucket_policy(bucket_name, prefix_name)
        eq_(response, Policy.READ_ONLY)
예제 #3
0
 def test_get_policy_for_non_existent_bucket(self, mock_connection):
     mock_server = MockConnection()
     mock_connection.return_value = mock_server
     bucket_name = 'non-existent-bucket'
     error = ("<ErrorResponse>"
              "<Code>NoSuchBucket</Code>"
              "<Message>No such bucket</Message><RequestId>1234</RequestId>"
              "<Resource>/non-existent-bucket</Resource>"
              "<HostId>abcd</HostId>"
              "<BucketName>non-existent-bucket</BucketName>"
              "</ErrorResponse>")
     mock_server.mock_add_request(
         MockResponse('GET',
                      'https://localhost:9000/' + bucket_name + '?policy=',
                      {'User-Agent': _DEFAULT_USER_AGENT},
                      404,
                      response_headers={"Content-Type": "application/xml"},
                      content=error.encode()))
     client = Minio('localhost:9000')
     client.get_bucket_policy(bucket_name)
 def test_get_policy_for_existent_bucket(self, mock_connection):
     mock_data = {
         "Version":
         "2012-10-17",
         "Statement": [{
             "Sid": "",
             "Effect": "Allow",
             "Principal": {
                 "AWS": "*"
             },
             "Action": "s3:GetBucketLocation",
             "Resource": "arn:aws:s3:::test-bucket"
         }, {
             "Sid": "",
             "Effect": "Allow",
             "Principal": {
                 "AWS": "*"
             },
             "Action": "s3:ListBucket",
             "Resource": "arn:aws:s3:::test-bucket"
         }, {
             "Sid": "",
             "Effect": "Allow",
             "Principal": {
                 "AWS": "*"
             },
             "Action": "s3:GetObject",
             "Resource": "arn:aws:s3:::test-bucket/*"
         }]
     }
     mock_server = MockConnection()
     mock_connection.return_value = mock_server
     bucket_name = 'test-bucket'
     mock_server.mock_add_request(
         MockResponse('GET',
                      'https://localhost:9000/' + bucket_name + '/?policy=',
                      {'User-Agent': _DEFAULT_USER_AGENT},
                      200,
                      content=mock_data))
     client = Minio('localhost:9000')
     response = client.get_bucket_policy(bucket_name)
     eq_(response, mock_data)
예제 #5
0
파일: tests.py 프로젝트: poornas/minio-py
def main():
    """
    Functional testing of minio python library.
    """
    fake = Factory.create()
    client = Minio('play.minio.io:9000',
                   'Q3AM3UQ867SPQQA43P2F',
                   'zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG')

    _http = urllib3.PoolManager(
        cert_reqs='CERT_REQUIRED',
        ca_certs=certifi.where()
    )

    # Get unique bucket_name, object_name.
    bucket_name = uuid.uuid4().__str__()
    object_name = uuid.uuid4().__str__()

    # Enable trace
    # client.trace_on(sys.stderr)

    # Make a new bucket.
    bucket_name = 'minio-pytest'

    client.make_bucket(bucket_name)

    is_s3 = client._endpoint_url.startswith("s3.amazonaws")
    if is_s3:
        client.make_bucket(bucket_name+'.unique',
                           location='us-west-1')

    ## Check if return codes a valid from server.
    if is_s3:
        try:
            client.make_bucket(bucket_name+'.unique',
                               location='us-west-1')
        except BucketAlreadyOwnedByYou as err:
            pass
        except BucketAlreadyExists as err:
            pass
        except ResponseError as err:
            raise

    # Check if bucket was created properly.
    client.bucket_exists(bucket_name)
    if is_s3:
        client.bucket_exists(bucket_name+'.unique')

    # List all buckets.
    buckets = client.list_buckets()
    for bucket in buckets:
        _, _ = bucket.name, bucket.creation_date

    with open('testfile', 'wb') as file_data:
        file_data.write(fake.text().encode('utf-8'))
    file_data.close()

    # Put a file
    file_stat = os.stat('testfile')
    with open('testfile', 'rb') as file_data:
        client.put_object(bucket_name, object_name, file_data,
                          file_stat.st_size)
    file_data.close()

    with open('largefile', 'wb') as file_data:
        for i in range(0, 104857):
            file_data.write(fake.text().encode('utf-8'))
    file_data.close()

    # Fput a file
    client.fput_object(bucket_name, object_name+'-f', 'testfile')
    if is_s3:
        client.fput_object(bucket_name, object_name+'-f', 'testfile',
                           metadata={'x-amz-storage-class': 'STANDARD_IA'})

    # Fput a large file.
    client.fput_object(bucket_name, object_name+'-large', 'largefile')
    if is_s3:
        client.fput_object(bucket_name, object_name+'-large', 'largefile',
                           metadata={'x-amz-storage-class': 'STANDARD_IA'})

    # Copy a file
    client.copy_object(bucket_name, object_name+'-copy',
                       '/'+bucket_name+'/'+object_name+'-f')

    try:
        copy_conditions = CopyConditions()
        copy_conditions.set_match_etag('test-etag')
        client.copy_object(bucket_name, object_name+'-copy',
                           '/'+bucket_name+'/'+object_name+'-f',
                           copy_conditions)
    except PreconditionFailed as err:
        if err.message != 'At least one of the preconditions you specified did not hold.':
            raise

    # Fetch stats on your object.
    client.stat_object(bucket_name, object_name)

    # Fetch stats on your object.
    client.stat_object(bucket_name, object_name+'-f')

    # Fetch stats on your large object.
    client.stat_object(bucket_name, object_name+'-large')

    # Fetch stats on your object.
    client.stat_object(bucket_name, object_name+'-copy')

    # Get a full object
    object_data = client.get_object(bucket_name, object_name)
    with open('newfile', 'wb') as file_data:
        for data in object_data:
            file_data.write(data)
    file_data.close()

    # Get a full object locally.
    client.fget_object(bucket_name, object_name, 'newfile-f')

    client.fput_object(bucket_name, object_name+'-f', 'testfile',
                       metadata={'x-amz-meta-testing': 'value'})

    stat = client.fget_object(bucket_name, object_name+'-f', 'newfile-f-custom')
    if not stat.metadata.has_key('X-Amz-Meta-Testing'):
        raise ValueError('Metadata key \'x-amz-meta-testing\' not found')
    value = stat.metadata['X-Amz-Meta-Testing']
    if value != 'value':
        raise ValueError('Metadata key has unexpected'
                         ' value {0}'.format(value))

    # List all object paths in bucket.
    print("Listing using ListObjects")
    objects = client.list_objects(bucket_name, recursive=True)
    for obj in objects:
        _, _, _, _, _, _ = obj.bucket_name, obj.object_name, \
                           obj.last_modified, \
                           obj.etag, obj.size, \
                           obj.content_type

    # List all object paths in bucket using V2 API.
    print("Listing using ListObjectsV2")
    objects = client.list_objects_v2(bucket_name, recursive=True)
    for obj in objects:
        _, _, _, _, _, _ = obj.bucket_name, obj.object_name, \
                           obj.last_modified, \
                           obj.etag, obj.size, \
                           obj.content_type

    presigned_get_object_url = client.presigned_get_object(bucket_name, object_name)
    response = _http.urlopen('GET', presigned_get_object_url)
    if response.status != 200:
        raise ResponseError(response,
                            'GET',
                            bucket_name,
                            object_name).get_exception()

    presigned_put_object_url = client.presigned_put_object(bucket_name, object_name)
    value = fake.text().encode('utf-8')
    data = io.BytesIO(value).getvalue()
    response = _http.urlopen('PUT', presigned_put_object_url, body=data)
    if response.status != 200:
        raise ResponseError(response,
                            'PUT',
                            bucket_name,
                            object_name).get_exception()

    object_data = client.get_object(bucket_name, object_name)
    if object_data.read() != value:
        raise ValueError('Bytes not equal')

    # Post policy.
    policy = PostPolicy()
    policy.set_bucket_name(bucket_name)
    policy.set_key_startswith('objectPrefix/')

    expires_date = datetime.utcnow()+timedelta(days=10)
    policy.set_expires(expires_date)
    client.presigned_post_policy(policy)

    # Remove all objects.
    client.remove_object(bucket_name, object_name)
    client.remove_object(bucket_name, object_name+'-f')
    client.remove_object(bucket_name, object_name+'-large')
    client.remove_object(bucket_name, object_name+'-copy')

    policy_name = client.get_bucket_policy(bucket_name)
    if policy_name != Policy.NONE:
        raise ValueError('Policy name is invalid ' + policy_name)

    # Set read-only policy successfully.
    client.set_bucket_policy(bucket_name, '1/', Policy.READ_ONLY)

    # Set read-write policy successfully.
    client.set_bucket_policy(bucket_name, '1/', Policy.READ_WRITE)

    # Reset policy to NONE.
    client.set_bucket_policy(bucket_name, '', Policy.NONE)

    # Validate if the policy is reverted back to NONE.
    policy_name = client.get_bucket_policy(bucket_name)
    if policy_name != Policy.NONE:
        raise ValueError('Policy name is invalid ' + policy_name)

    # Upload some new objects to prepare for multi-object delete test.
    print("Prepare for remove_objects() test.")
    object_names = []
    for i in range(10):
        curr_object_name = object_name+"-{}".format(i)
        # print("object-name: {}".format(curr_object_name))
        client.fput_object(bucket_name, curr_object_name, "testfile")
        object_names.append(curr_object_name)

    # delete the objects in a single library call.
    print("Performing remove_objects() test.")
    del_errs = client.remove_objects(bucket_name, object_names)
    had_errs = False
    for del_err in del_errs:
        had_errs = True
        print("Remove objects err is {}".format(del_err))
    if had_errs:
        print("Removing objects FAILED - it had unexpected errors.")
        raise
    else:
        print("Removing objects worked as expected.")

    # Remove a bucket. This operation will only work if your bucket is empty.
    print("Deleting buckets and finishing tests.")
    client.remove_bucket(bucket_name)
    if client._endpoint_url.startswith("s3.amazonaws"):
        client.remove_bucket(bucket_name+'.unique')

    # Remove temporary files.
    os.remove('testfile')
    os.remove('newfile')
    os.remove('newfile-f')
    os.remove('largefile')
    os.remove('newfile-f-custom')
예제 #6
0
# Copyright (C) 2016 MinIO, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
# dummy values, please replace them with original values.

from minio import Minio
from minio.error import ResponseError

client = Minio('s3.amazonaws.com', secure=True,
               access_key='YOUR-ACCESSKEYID',
               secret_key='YOUR-SECRETACCESSKEY')

# Make a new bucket
try:
    # Get current policy of bucket 'my-bucketname'.
    print(client.get_bucket_policy('my-bucketname'))
except ResponseError as err:
    print(err)
예제 #7
0
class Operation:
    client = None

    def __init__(self):
        minio_conf = conf.Conf()
        self.client = Minio(minio_conf.get_endpoint(),
                            access_key=minio_conf.get_access_key(),
                            secret_key=minio_conf.get_secret_key(),
                            secure=minio_conf.get_secure())

    def make_bucket(self, bucket_name):
        try:
            self.client.make_bucket(bucket_name=bucket_name)
        except ResponseError as err:
            print err
            exit(0)
        print "cminio: create [%s] bucket success" % bucket_name

    def list_buckets(self):
        buckets = self.client.list_buckets()
        print '[bucket name]\t', '[create time]\t'
        for bucket in buckets:
            print '%s\t%s\t' % (bucket.name, bucket.creation_date)

    def bucket_exists(self, bucket_name):
        try:
            if self.client.bucket_exists(bucket_name=bucket_name):
                print "cminio: the [%s] bucket is exist" % bucket_name
            else:
                print "cminio: the [%s] bucket is not exist" % bucket_name
        except ResponseError as err:
            print err

    def list_objects(self, prefix, bucket_name, recursive):
        objects = self.client.list_objects(bucket_name=bucket_name,
                                           prefix=prefix,
                                           recursive=recursive)
        print "[bucket name]\t[object name]\t[object last modified]\t[etag]\t[size]\t[content type]\t"
        i = 0
        for object in objects:
            print "%s\t%s\t%s\t%s\t%s\t%s\t" % (
                object.bucket_name, object.object_name.encode('utf-8'),
                object.last_modified, object.etag, object.size,
                object.content_type)
            i = i + 1
        print "\n Total: %s object[s]" % i

    def get_bucket_policy(self, bucket_name):
        policy = self.client.get_bucket_policy(bucket_name=bucket_name)
        print "[%s] policy: %s" % (bucket_name, policy)

    def set_bucket_policy(self, bucket_name, path):
        bucket_policy = policy.Policy()
        self.client.set_bucket_policy(bucket_name,
                                      bucket_policy.policy_from_disk(path))

    def get_bucket_notification(self, bucket_name):
        notification = self.client.get_bucket_notification(
            bucket_name=bucket_name)
        print "[%s] bucket: %s" % (bucket_name, notification)

    def set_bucket_notification(self, bucket_name, notification):
        try:
            self.client.set_bucket_notification(bucket_name, notification)
        except ResponseError as err:
            print err

    def remove_all_bucket_notification(self, bucket_name):
        self.client.remove_all_bucket_notification(bucket_name)

    def get_object(self,
                   bucket_name,
                   object_name,
                   request_headers=None,
                   output=None):
        try:
            data = self.client.get_object(bucket_name,
                                          object_name,
                                          request_headers=request_headers)
            with open(output, "wb") as file_data:
                for d in data.stream(32 * 1024):
                    file_data.write(d)
        except ResponseError as err:
            print err
예제 #8
0
파일: base_task.py 프로젝트: wtfuii/daemon
class BaseTask():
    name = 'BaseTask'
    body_id = None
    services = []

    def __init__(self):
        self.config = get_config(os.getenv('APPLICATION_MODE',
                                           'DEVELOPMENT'))()
        self.init_logging()
        self.init_db()
        self.default_config = {
            "id": "",
            "rgs": "",
            "url": "",
            "force_full_sync": 0,
            "wait_time": 0.2,
            "geofabrik_package": None,
            "osm_relation": None,
            "name": None
        }

    def init_logging(self):
        # prepare datalog
        self.datalog = logging.getLogger('datalog')
        self.datalog.setLevel(logging.DEBUG)
        datalog_stream_handler = logging.StreamHandler()
        datalog_stream_handler.setFormatter(
            logging.Formatter('%(asctime)s %(levelname)s: %(message)s '))
        datalog_stream_handler.setLevel(logging.DEBUG)

        datalog_file_handler = logging.FileHandler(
            "%s/%s-%s-%s.log" %
            (self.config.LOG_DIR,
             (time.strftime("%Y-%m-%d--%H-%M-%S")), self.name, self.body_id))
        datalog_file_handler.setFormatter(
            logging.Formatter('%(asctime)s %(levelname)s: %(message)s '))
        datalog_file_handler.setLevel(logging.DEBUG)
        self.datalog.addHandler(datalog_file_handler)

        datalog_stream_handler = logging.StreamHandler()
        datalog_stream_handler.setFormatter(
            logging.Formatter('%(asctime)s %(levelname)s: %(message)s '))
        datalog_stream_handler.setLevel(logging.DEBUG)
        self.datalog.addHandler(datalog_stream_handler)

    def init_db(self):
        if 'mongodb' in self.services:
            mongoengine.connect(db=self.config.MONGO_DB_NAME,
                                host=self.config.MONGO_DB_HOST,
                                port=self.config.MONGO_DB_PORT,
                                connect=False)
            try:
                self.db_raw_client = pymongo.MongoClient(
                    host=self.config.MONGO_DB_HOST,
                    port=self.config.MONGO_DB_PORT,
                    connect=False)
                self.db_raw_client.server_info()
            except pymongo.errors.ServerSelectionTimeoutError as err:
                sys.exit('fatal: connection to MongoDB can\'t be established.')
            self.db_raw = self.db_raw_client[self.config.MONGO_DB_NAME]
        self.s3 = None
        if 's3' in self.services:
            self.s3 = Minio(self.config.S3_ENDPOINT,
                            access_key=self.config.S3_ACCESS_KEY,
                            secret_key=self.config.S3_SECRET_KEY,
                            secure=self.config.S3_SECURE)
            try:
                if not self.s3.bucket_exists(self.config.S3_BUCKET):
                    self.s3.make_bucket(self.config.S3_BUCKET,
                                        location=self.config.S3_LOCATION)
            except (MaxRetryError, ResponseError) as err:
                sys.exit('fatal: connection to Minio can\'t be established.')
            # Policies
            needs_policy_update = False
            try:
                policies = json.loads(
                    self.s3.get_bucket_policy(
                        self.config.S3_BUCKET).decode("utf-8"))
                if self.config.ENABLE_PROCESSING and len(
                        policies['Statement']) == 3:
                    needs_policy_update = True
            except NoSuchBucketPolicy:
                needs_policy_update = True
            if needs_policy_update:
                policies = [{
                    'Effect':
                    'Allow',
                    'Principal': {
                        'AWS': ['*']
                    },
                    'Action': ['s3:GetBucketLocation'],
                    'Resource': ['arn:aws:s3:::%s' % self.config.S3_BUCKET]
                }, {
                    'Effect':
                    'Allow',
                    'Principal': {
                        'AWS': ['*']
                    },
                    'Action': ['s3:ListBucket'],
                    'Resource': ['arn:aws:s3:::%s' % self.config.S3_BUCKET],
                    'Condition': {
                        'StringEquals': {
                            's3:prefix': ['files']
                        }
                    }
                }, {
                    'Effect':
                    'Allow',
                    'Principal': {
                        'AWS': ['*']
                    },
                    'Action': ['s3:GetObject'],
                    'Resource':
                    ['arn:aws:s3:::%s/files*' % self.config.S3_BUCKET]
                }]
                if self.config.ENABLE_PROCESSING:
                    policies.append({
                        'Effect':
                        'Allow',
                        'Principal': {
                            'AWS': ['*']
                        },
                        'Action': ['s3:ListBucket'],
                        'Resource':
                        ['arn:aws:s3:::%s' % self.config.S3_BUCKET],
                        'Condition': {
                            'StringEquals': {
                                's3:prefix': ['file-thumbnails']
                            }
                        }
                    })
                    policies.append({
                        'Effect':
                        'Allow',
                        'Principal': {
                            'AWS': ['*']
                        },
                        'Action': ['s3:GetObject'],
                        'Resource': [
                            'arn:aws:s3:::%s/file-thumbnails*' %
                            self.config.S3_BUCKET
                        ]
                    })
                self.s3.set_bucket_policy(
                    self.config.S3_BUCKET,
                    json.dumps({
                        'Version': '2012-10-17',
                        'Statement': policies
                    }))
        self.es = None
        if 'elasticsearch' in self.services:
            self.es = Elasticsearch(self.config.ES_HOSTS)

    def close(self):
        self.close_connections()
        self.close_logging()

    def close_connections(self):
        self.db_raw_client.close()
        self.db_raw_client = None
        mongoengine_disconnect()
        if self.config.ES_ENABLED and self.es:
            for conn in self.es.transport.connection_pool.connections:
                conn.pool.close()
            self.es = None

    def close_logging(self):
        for handler in self.datalog.handlers:
            self.datalog.removeHandler(handler)

    def get_file(self, file, save_to):
        if file.storedAtMirror:
            if not file.mirrorAccessUrl:
                return False
            r = requests.get(file.mirrorAccessUrl, stream=True)
            if r.status_code != 200:
                return False
            with open(save_to, 'wb') as file_data:
                for chunk in r.iter_content(chunk_size=32 * 1024):
                    if chunk:
                        file_data.write(chunk)
            return True
        else:
            try:
                data = self.s3.get_object(
                    self.config.S3_BUCKET,
                    "files/%s/%s" % (file.body.id, file.id))
            except NoSuchKey:
                return False
            with open(save_to, 'wb') as file_data:
                for chunk in data.stream(32 * 1024):
                    file_data.write(chunk)
            return True

    def execute(self, cmd, body_id):
        new_env = os.environ.copy()
        new_env['XDG_RUNTIME_DIR'] = '/tmp/'
        try:
            output, error = subprocess.Popen(
                cmd.split(' '),
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                env=new_env).communicate(
                    timeout=self.config.SUBPROCESS_TIMEOUT)
        except subprocess.TimeoutExpired:
            self.send_mail(self.config.ADMINS,
                           'critical error at oparl-mirror',
                           'command %s at %s takes forever' % (cmd, body_id))
            return None
        try:
            if error is not None and error.decode().strip(
            ) != '' and 'WARNING **: clutter failed 0, get a life.' not in error.decode(
            ):
                self.datalog.debug("pdf output at command %s; output: %s" %
                                   (cmd, error.decode()))
        except UnicodeDecodeError:
            self.datalog.debug("pdf output at command %s; output: %s" %
                               (cmd, error))
        return output

    def get_body_config(self, body_id=False, filename=False):
        if not filename:
            filename = self.get_body_config_file(body_id)
        try:
            with open('%s/%s' %
                      (self.config.BODY_DIR, filename)) as body_config_file:
                if not body_config_file:
                    return None
                try:
                    body_config = deepcopy(self.default_config)
                    body_config.update(
                        yaml.load(body_config_file, Loader=yaml.SafeLoader))
                    if not body_config.get('active'):
                        return None
                    if self.config.BODY_LIST_MODE == 'blacklist':
                        if body_config.get('id') in self.config.BODY_LIST:
                            return None
                    else:
                        if body_config.get('id') not in self.config.BODY_LIST:
                            return None
                    return body_config
                except ValueError:
                    return None
        except FileNotFoundError:
            return None

    def get_region_config(self, region_id):
        filename = self.get_region_config_file(region_id)
        try:
            with open(
                    '%s/%s' %
                (self.config.REGION_DIR, filename)) as region_config_file:
                if not region_config_file:
                    return None
                try:
                    region_config = yaml.load(region_config_file,
                                              Loader=yaml.SafeLoader)
                    if not region_config.get('active'):
                        return None
                    if self.config.REGION_LIST_MODE == 'blacklist':
                        if region_config.get('id') in self.config.REGION_LIST:
                            return None
                    else:
                        if region_config.get(
                                'id') not in self.config.REGION_LIST:
                            return None
                    return region_config
                except ValueError:
                    return None
        except FileNotFoundError:
            return None

    def send_mail(self, receivers=None, subject='', body=''):
        smtp = smtplib.SMTP()
        smtp.connect(self.config.MAIL_SERVER, self.config.MAIL_PORT)
        smtp.ehlo_or_helo_if_needed()
        if self.config.MAIL_USE_TLS:
            smtp.starttls()
            smtp.ehlo_or_helo_if_needed()
        smtp.login(user=self.config.MAIL_USERNAME,
                   password=self.config.MAIL_PASSWORD)
        body_full = '\r\n'.join([
            'To: %s' % ', '.join(receivers),
            'From: %s' % self.config.MAIL_FROM,
            'Subject: %s' % subject, '', body
        ])
        smtp.sendmail(self.config.MAIL_FROM, receivers, body_full)

    def get_body_config_file(self, body_id):
        for filename in os.listdir(self.config.BODY_DIR):
            if filename.startswith('%s' % body_id):
                return filename

    def get_region_config_file(self, regio_id):
        for filename in os.listdir(self.config.REGION_DIR):
            if filename.startswith('%s' % regio_id):
                return filename
예제 #9
0
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
# dummy values, please replace them with original values.

from minio import Minio
from minio.error import ResponseError

client = Minio('s3.amazonaws.com', secure=True,
               access_key='YOUR-ACCESSKEYID',
               secret_key='YOUR-SECRETACCESSKEY')

# Make a new bucket
try:
    # Get current policy of bucket 'my-bucketname'.
    print(client.get_bucket_policy('my-bucketname'))

    # Get current policy of bucket 'my-bucketname' and prefix 'my-prefix'.
    print(client.get_bucket_policy('my-bucketname', 'my-prefix'))
except ResponseError as err:
    print(err)
예제 #10
0
class MinioAdapter(BaseStorageAdapter):
    def __init__(self, endpoint=None, access_key=None, secret_key=None, secure=False, *args, **kwargs):
        # Initialize minioClient with an endpoint and access/secret keys.

        super().__init__(*args, **kwargs)
        try:
            self._client = Minio(endpoint=endpoint,
                                 access_key=access_key,
                                 secret_key=secret_key,
                                 secure=secure)
        except KeyError as err:
            logger.error(err)
            raise Exception("Please enter proper HOSTNAME,AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY in .env ")

    def bucket_exists(self, bucket_name):
        if self._client.bucket_exists(bucket_name):
            return True
        return False

    def create_bucket(self, bucket_name):

        try:
            if not self._client.bucket_exists(bucket_name):
                self._client.make_bucket(bucket_name)
        except ResponseError as err:
            raise ResponseError

    def remove_bucket(self, bucket_name):
        try:
            if self._client.bucket_exists(bucket_name):
                self._client.remove_bucket(bucket_name)
        except Exception as e:
            raise e

    def remove_file(self, bucket_name, file_name):
        try:
            if self._client.bucket_exists(self._client):
                if (self._client.get_object(self._client, file_name)):
                    self._client.remove_object(bucket_name, file_name)
        except Exception as e:
            raise e

    def get_bucket_list(self):
        bucket_list = self._client.list_buckets()
        return bucket_list

    def get_all_files(self, bucket_name):
        pass

    def upload_file(self, bucket_name, file_name, file_path):
        if (not self._client.bucket_exists(bucket_name)):
            self._client.make_bucket(bucket_name=bucket_name)
        try:
            self._client.fput_object(bucket_name=bucket_name, object_name=file_name, file_path=file_path)
            self.logger.info(f"Uploaded file {file_name}")
        except ResponseError as e:
            self.logger.error(e)
            raise Exception(e)

    def upload_data_stream(self, bucket_name, file_name, data_stream, length):

        if (not self._client.bucket_exists(bucket_name)):
            self._client.make_bucket(bucket_name=bucket_name)

        try:

            self._client.put_object(bucket_name=bucket_name, object_name=file_name, data=data_stream, length=length)
        except ResponseError as err:
            self.logger.error(err)
            raise err

    def download_all_files(self, bucket_name, download_path):
        try:
            if (self._client.bucket_exists(bucket_name)):
                obj_list = self._client.list_objects(bucket_name)
                for obj in obj_list:
                    self._client.fget_object(bucket_name=bucket_name, object_name=obj, file_path=download_path)
        except Exception as e:
            self.logger.error(e)
            raise e

    def download_n_files(self, bucket_name, download_path, num_of_files):
        try:
            count = 0
            for file in self._client.list_objects(bucket_name):
                self._client.fget_object(bucket_name=bucket_name, object_name=file, file_path=download_path)
                count = count + 1
                if count == num_of_files:
                    break
        except ResponseError as e:
            self.logger.error(e)
            raise e

    def count_files(self, bucket_name):
        list = self._client.list_objects(bucket_name)
        return len(list)

    def get_policy(self, bucket_name):
        policy = self._client.get_bucket_policy(bucket_name)
        return policy

    def set_policy(self, bucket_name, policy):
        self._client.set_bucket_policy(bucket_name, policy)
예제 #11
0
# -*- coding: utf-8 -*-
# MinIO Python Library for Amazon S3 Compatible Cloud Storage.
# Copyright (C) 2016 MinIO, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from minio import Minio

client = Minio(
    "play.min.io",
    access_key="Q3AM3UQ867SPQQA43P2F",
    secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)

policy = client.get_bucket_policy("my-bucket")
예제 #12
0
        "Resource": "arn:aws:s3:::mybucket"
    }, {
        "Sid": "",
        "Effect": "Allow",
        "Principal": {
            "AWS": "*"
        },
        "Action": "s3:GetObject",
        "Resource": "arn:aws:s3:::mybucket/*"
    }]
}

minioClient.set_bucket_policy('mybucket', json.dumps(policy_read_only))

# Get current policy of all object paths in bucket "mybucket".
policy = minioClient.get_bucket_policy('mybucket')
print(policy)

notification = {
    'QueueConfigurations': [{
        'Id': '1',
        'Arn': 'arn1',
        'Events': ['s3:ObjectCreated:*'],
        'Filter': {
            'Key': {
                'FilterRules': [{
                    'Name': 'prefix',
                    'Value': 'abc'
                }]
            }
        }
예제 #13
0
import os
from minio import Minio
from minio.error import ResponseError

bucket_name = 'test101'

client = Minio('your-s3-endpoint.com',
               access_key='xxxxxxxxxxxxxx',
               secret_key='xxxxxxxxxxxxxx',
               secure=True)

print("\n=== check if bucket '%s' exists ===" % bucket_name)
try:
    print(client.bucket_exists(bucket_name))
    print('\n=== check bucket info ===')
    print(client.get_bucket_policy(bucket_name))
except ResponseError as err:
    raise
    print(err)
    sys.exit(1)

print("\n=== list all buckets '%s' ===" % bucket_name)
buckets = client.list_buckets()
for bucket in buckets:
    print(bucket.name, bucket.creation_date)

print("\n=== put object to buckets '%s' ===" % bucket_name)
try:
    client.fput_object(bucket_name,
                       'test1.csv',
                       'test1.csv',
예제 #14
0
from minio import Minio
client = Minio("s3.embl.de", access_key="ysun-user", secret_key="PZx9Djtl7yeV7Kp5k8Gsm7y2SFJ2Gw6W", secure=True)
buckets = client.list_buckets()
for bucket in buckets:
    print (bucket).name

print (client.bucket_exists('platybrowser'))
print (client.get_bucket_policy('platybrowser'))
예제 #15
0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
# dummy values, please replace them with original values.

from minio import Minio
from minio.error import ResponseError

client = Minio('s3.amazonaws.com',
               secure=True,
               access_key='YOUR-ACCESSKEYID',
               secret_key='YOUR-SECRETACCESSKEY')

# Make a new bucket
try:
    # Get current policy of bucket 'my-bucketname'.
    print(client.get_bucket_policy('my-bucketname'))
except ResponseError as err:
    print(err)
예제 #16
0
def main():
    """
    Functional testing of minio python library.
    """
    fake = Factory.create()
    client = Minio('s3.amazonaws.com', os.getenv('ACCESS_KEY'),
                   os.getenv('SECRET_KEY'))

    _http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',
                                ca_certs=certifi.where())

    # Get unique bucket_name, object_name.
    bucket_name = uuid.uuid4().__str__()
    object_name = uuid.uuid4().__str__()

    # Enable trace
    # client.trace_on(sys.stderr)

    # Make a new bucket.
    bucket_name = 'minio-pytest'

    print(client.make_bucket(bucket_name))
    print(client.make_bucket(bucket_name + '.unique', location='us-west-1'))

    ## Check if return codes a valid from server.
    try:
        client.make_bucket(bucket_name + '.unique', location='us-west-1')
    except ResponseError as err:
        if str(err.code) in ['BucketAlreadyOwnedByYou', 'BucketAlreadyExists']:
            pass
        else:
            raise

    # Check if bucket was created properly.
    print(client.bucket_exists(bucket_name))
    print(client.bucket_exists(bucket_name + '.unique'))

    # List all buckets.
    buckets = client.list_buckets()
    for bucket in buckets:
        print(bucket.name, bucket.creation_date)

    with open('testfile', 'wb') as file_data:
        file_data.write(fake.text().encode('utf-8'))
    file_data.close()

    # Put a file
    file_stat = os.stat('testfile')
    with open('testfile', 'rb') as file_data:
        client.put_object(bucket_name, object_name, file_data,
                          file_stat.st_size)
    file_data.close()

    # Fput a file
    print(client.fput_object(bucket_name, object_name + '-f', 'testfile'))

    # Copy a file
    print(
        client.copy_object(bucket_name, object_name + '-copy',
                           '/' + bucket_name + '/' + object_name + '-f'))

    try:
        copy_conditions = CopyConditions()
        copy_conditions.set_match_etag('test-etag')
        print(
            client.copy_object(bucket_name, object_name + '-copy',
                               '/' + bucket_name + '/' + object_name + '-f',
                               copy_conditions))
    except ResponseError as err:
        if err.code != 'PreconditionFailed':
            raise
        if err.message != 'At least one of the pre-conditions you specified did not hold':
            raise

    # Fetch stats on your object.
    print(client.stat_object(bucket_name, object_name))

    # Fetch stats on your object.
    print(client.stat_object(bucket_name, object_name + '-f'))

    # Fetch stats on your object.
    print(client.stat_object(bucket_name, object_name + '-copy'))

    # Get a full object
    object_data = client.get_object(bucket_name, object_name)
    with open('newfile', 'wb') as file_data:
        for data in object_data:
            file_data.write(data)
    file_data.close()

    # Get a full object locally.
    print(client.fget_object(bucket_name, object_name, 'newfile-f'))

    # List all object paths in bucket.
    print("Listing using ListObjects API")
    objects = client.list_objects(bucket_name, recursive=True)
    for obj in objects:
        print(obj.bucket_name, obj.object_name, obj.last_modified, \
            obj.etag, obj.size, obj.content_type)

    # List all object paths in bucket using V2 API.
    print("Listing using ListObjectsV2 API")
    objects = client.list_objects_v2(bucket_name, recursive=True)
    for obj in objects:
        print(obj.bucket_name, obj.object_name, obj.last_modified, \
            obj.etag, obj.size, obj.content_type)

    presigned_get_object_url = client.presigned_get_object(
        bucket_name, object_name)
    response = _http.urlopen('GET', presigned_get_object_url)
    if response.status != 200:
        response_error = ResponseError(response)
        raise response_error.get(bucket_name, object_name)

    presigned_put_object_url = client.presigned_put_object(
        bucket_name, object_name)
    value = fake.text().encode('utf-8')
    data = io.BytesIO(value).getvalue()
    response = _http.urlopen('PUT', presigned_put_object_url, body=data)
    if response.status != 200:
        response_error = ResponseError(response)
        raise response_error.put(bucket_name, object_name)

    object_data = client.get_object(bucket_name, object_name)
    if object_data.read() != value:
        raise ValueError('Bytes not equal')

    # Post policy.
    policy = PostPolicy()
    policy.set_bucket_name(bucket_name)
    policy.set_key_startswith('objectPrefix/')

    expires_date = datetime.utcnow() + timedelta(days=10)
    policy.set_expires(expires_date)
    print(client.presigned_post_policy(policy))

    # Remove an object.
    print(client.remove_object(bucket_name, object_name))
    print(client.remove_object(bucket_name, object_name + '-f'))
    print(client.remove_object(bucket_name, object_name + '-copy'))

    policy_name = client.get_bucket_policy(bucket_name)
    if policy_name != Policy.NONE:
        raise ValueError('Policy name is invalid ' + policy_name)

    # Set read-write policy successfully.
    client.set_bucket_policy(bucket_name, '', Policy.READ_WRITE)

    # Reset policy to NONE.
    client.set_bucket_policy(bucket_name, '', Policy.NONE)

    # Validate if the policy is reverted back to NONE.
    policy_name = client.get_bucket_policy(bucket_name)
    if policy_name != Policy.NONE:
        raise ValueError('Policy name is invalid ' + policy_name)

    # Upload some new objects to prepare for multi-object delete test.
    print("Prepare for remove_objects() test.")
    object_names = []
    for i in range(10):
        curr_object_name = object_name + "-{}".format(i)
        print("object-name: {}".format(curr_object_name))
        print(client.fput_object(bucket_name, curr_object_name, "testfile"))
        object_names.append(curr_object_name)

    # delete the objects in a single library call.
    print("Performing remove_objects() test.")
    del_errs = client.remove_objects(bucket_name, object_names)
    had_errs = False
    for del_err in del_errs:
        had_errs = True
        print("Err is {}".format(del_err))
    if had_errs:
        print("remove_objects() FAILED - it had unexpected errors.")
    else:
        print("remove_objects() worked as expected.")

    # Remove a bucket. This operation will only work if your bucket is empty.
    print(client.remove_bucket(bucket_name))
    print(client.remove_bucket(bucket_name + '.unique'))

    # Remove temporary files.
    os.remove('testfile')
    os.remove('newfile')
    os.remove('newfile-f')