コード例 #1
0
def get_s3_connection(aws_connect_kwargs, location, rgw, s3_url):
    if s3_url and rgw:
        rgw = urlparse(s3_url)
        s3 = boto.connect_s3(is_secure=rgw.scheme == 'https',
                             host=rgw.hostname,
                             port=rgw.port,
                             calling_format=OrdinaryCallingFormat(),
                             **aws_connect_kwargs)
    elif is_fakes3(s3_url):
        fakes3 = urlparse(s3_url)
        s3 = S3Connection(is_secure=fakes3.scheme == 'fakes3s',
                          host=fakes3.hostname,
                          port=fakes3.port,
                          calling_format=OrdinaryCallingFormat(),
                          **aws_connect_kwargs)
    elif is_walrus(s3_url):
        walrus = urlparse(s3_url).hostname
        s3 = boto.connect_walrus(walrus, **aws_connect_kwargs)
    else:
        aws_connect_kwargs['is_secure'] = True
        try:
            s3 = connect_to_aws(boto.s3, location, **aws_connect_kwargs)
        except AnsibleAWSError:
            # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
            s3 = boto.connect_s3(**aws_connect_kwargs)
    return s3
コード例 #2
0
def s3_to_df(s3_file_key):
    """
    Downloads file from S3 and converts it to a Pandas DataFrame. Deletes the file from local disk when done.

    Parameters
    ----------
    s3_file_key: str
        Eucalyptus S3 file key

    Returns
    -------
    Pandas DataFrame
    """
    # Add random number to file name to avoid collisions with other processes on the same machine
    filename = '/tmp/{}_{}'.format(s3_file_key.replace('/', '_'),
                                   random.randint(1, 1e6))
    """ Amazon S3 code """
    # s3 = boto3.client('s3')
    # s3.download_file(S3_BUCKET, s3_file_key, filename)
    """ Eucalyptus S3 code """
    s3conn = boto.connect_walrus(aws_access_key_id=EUCA_KEY_ID,
                                 aws_secret_access_key=EUCA_SECRET_KEY,
                                 is_secure=False,
                                 port=8773,
                                 path=EUCA_S3_PATH,
                                 host=EUCA_S3_HOST)
    euca_bucket = s3conn.get_bucket(S3_BUCKET)
    k = boto.s3.key.Key(bucket=euca_bucket, name=s3_file_key)
    k.get_contents_to_filename(filename)

    df = pd.read_csv(filename)
    os.remove(filename)
    return df
コード例 #3
0
def main():
        # PARSE OPTIONS 
        parser = OptionParser()
        parser.add_option("--user1-access", dest="user1_access",
            help="Access key for user1")
        parser.add_option("--user2-access", dest="user2_access",
            help="Access key for user2")
        parser.add_option("--user1-secret", dest="user1_secret",
            help="Secret key for user2")
        parser.add_option("--user2-secret", dest="user2_secret",
            help="Secret key for user2")
        parser.add_option("-u","--url", dest="url",
            help="URL for S3/Walrus")        
        (options, args) = parser.parse_args()
        ### LOAD OPTIONS INTO LOCAL VARS
        user1_access = options.user1_access
        user2_access = options.user2_access
        user1_secret = options.user1_secret
        user2_secret = options.user2_secret
        url = options.url
        bucket_name =  "newbuck" + str(int(time.time()))
        key_name = "testfile"
        ## CREATE 2 walrus connections
        try:
            walrus_user1 = boto.connect_walrus(host=url, aws_access_key_id=user1_access, aws_secret_access_key=user1_secret,debug=0)
        except Exception, e:
            fail("Failure creating first walrus Boto conneciton: " + str(e))
コード例 #4
0
def upload_to_s3(filepath, filename, job_id):
    """
    Uploads a file to Eucalyptus S3.

    Parameters
    ----------
    filepath: str
        Local path to the file
    filename: str
        Name of the file with the extension, example: file.csv
    job_id: str

    Returns
    -------
    str
        Eucalyptus S3 key generated for this file
    """
    s3_file_key = generate_s3_file_key(job_id, filename)
    """ Amazon S3 code """
    # s3 = boto3.resource('s3')
    # s3.meta.client.upload_file(filepath, S3_BUCKET, s3_file_key)
    """ Eucalyptus S3 code """
    s3conn = boto.connect_walrus(aws_access_key_id=EUCA_KEY_ID,
                                 aws_secret_access_key=EUCA_SECRET_KEY,
                                 is_secure=False,
                                 port=8773,
                                 path=EUCA_S3_PATH,
                                 host=EUCA_S3_HOST)
    euca_bucket = s3conn.get_bucket(S3_BUCKET)
    k = boto.s3.key.Key(bucket=euca_bucket, name=s3_file_key)
    k.set_contents_from_filename(filepath)
    return s3_file_key
コード例 #5
0
ファイル: s3.py プロジェクト: vadikgo/ansible-docker-test
def get_s3_connection(aws_connect_kwargs, location, rgw, s3_url):
    if s3_url and rgw:
        rgw = urlparse(s3_url)
        s3 = boto.connect_s3(
            is_secure=rgw.scheme == 'https',
            host=rgw.hostname,
            port=rgw.port,
            calling_format=OrdinaryCallingFormat(),
            **aws_connect_kwargs
        )
    elif is_fakes3(s3_url):
        fakes3 = urlparse(s3_url)
        s3 = S3Connection(
            is_secure=fakes3.scheme == 'fakes3s',
            host=fakes3.hostname,
            port=fakes3.port,
            calling_format=OrdinaryCallingFormat(),
            **aws_connect_kwargs
        )
    elif is_walrus(s3_url):
        walrus = urlparse(s3_url).hostname
        s3 = boto.connect_walrus(walrus, **aws_connect_kwargs)
    else:
        aws_connect_kwargs['is_secure'] = True
        try:
            s3 = connect_to_aws(boto.s3, location, **aws_connect_kwargs)
        except AnsibleAWSError:
            # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
            s3 = boto.connect_s3(**aws_connect_kwargs)
    return s3
コード例 #6
0
def get_s3_connection(aws_connect_kwargs, location, rgw, s3_url):
    if s3_url and rgw:
        rgw = urlparse(s3_url)
        # ensure none of the named arguments we will pass to boto.connect_s3
        # are already present in aws_connect_kwargs
        for kw in ['is_secure', 'host', 'port', 'calling_format']:
            try:
                del aws_connect_kwargs[kw]
            except KeyError:
                pass
        s3 = boto.connect_s3(
            is_secure=rgw.scheme == 'https',
            host=rgw.hostname,
            port=rgw.port,
            calling_format=OrdinaryCallingFormat(),
            **aws_connect_kwargs
        )
    elif is_fakes3(s3_url):
        fakes3 = urlparse(s3_url)
        # ensure none of the named arguments we will pass to S3Connection
        # are already present in aws_connect_kwargs
        for kw in ['is_secure', 'host', 'port', 'calling_format']:
            try:
                del aws_connect_kwargs[kw]
            except KeyError:
                pass
        s3 = S3Connection(
            is_secure=fakes3.scheme == 'fakes3s',
            host=fakes3.hostname,
            port=fakes3.port,
            calling_format=OrdinaryCallingFormat(),
            **aws_connect_kwargs
        )
    elif is_walrus(s3_url):
        walrus = urlparse(s3_url).hostname
        s3 = boto.connect_walrus(walrus, **aws_connect_kwargs)
    else:
        aws_connect_kwargs['is_secure'] = True
        try:
            s3 = connect_to_aws(boto.s3, location, **aws_connect_kwargs)
        except AnsibleAWSError:
            # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
            s3 = boto.connect_s3(**aws_connect_kwargs)
    return s3
コード例 #7
0
def get_s3_connection(module, aws_connect_kwargs, region, s3_url=None):
    s3 = None
    # allow eucarc environment variables to be used if ansible vars aren't set
    if not s3_url and 'S3_URL' in os.environ:
        s3_url = os.environ['S3_URL']

    if region in ('us-east-1', '', None):
        # S3ism for the US Standard region
        location = Location.DEFAULT
    else:
        # Boto uses symbolic names for locations but region strings will
        # actually work fine for everything except us-east-1 (US Standard)
        location = region

    # Look at s3_url and tweak connection settings
    # if connecting to Walrus or fakes3
    try:
        if is_fakes3(s3_url):
            fakes3 = urlparse.urlparse(s3_url)
            s3 = S3Connection(
                is_secure=fakes3.scheme == 'fakes3s',
                host=fakes3.hostname,
                port=fakes3.port,
                calling_format=OrdinaryCallingFormat(),
                **aws_connect_kwargs
            )
        elif is_walrus(s3_url):
            walrus = urlparse.urlparse(s3_url).hostname
            s3 = boto.connect_walrus(walrus, **aws_connect_kwargs)
        else:
            s3 = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_kwargs)
            # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
            if s3 is None:
                s3 = boto.connect_s3(**aws_connect_kwargs)

    except boto.exception.NoAuthHandlerFound, e:
        module.fail_json(msg='No Authentication Handler found: %s ' % str(e))
コード例 #8
0
ファイル: s3.py プロジェクト: Devopst03/devopst
def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(
        dict(
            bucket=dict(required=True),
            dest=dict(default=None),
            encrypt=dict(default=True, type='bool'),
            expiry=dict(default=600, aliases=['expiration']),
            headers=dict(type='dict'),
            marker=dict(default=None),
            max_keys=dict(default=1000),
            metadata=dict(type='dict'),
            mode=dict(choices=[
                'get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj',
                'list'
            ],
                      required=True),
            object=dict(),
            permission=dict(type='list', default=['private']),
            version=dict(default=None),
            overwrite=dict(aliases=['force'], default='always'),
            prefix=dict(default=None),
            retries=dict(aliases=['retry'], type='int', default=0),
            s3_url=dict(aliases=['S3_URL']),
            src=dict(),
        ), )
    module = AnsibleModule(argument_spec=argument_spec)

    if not HAS_BOTO:
        module.fail_json(msg='boto required for this module')

    bucket = module.params.get('bucket')
    encrypt = module.params.get('encrypt')
    expiry = int(module.params['expiry'])
    if module.params.get('dest'):
        dest = os.path.expanduser(module.params.get('dest'))
    headers = module.params.get('headers')
    marker = module.params.get('marker')
    max_keys = module.params.get('max_keys')
    metadata = module.params.get('metadata')
    mode = module.params.get('mode')
    obj = module.params.get('object')
    version = module.params.get('version')
    overwrite = module.params.get('overwrite')
    prefix = module.params.get('prefix')
    retries = module.params.get('retries')
    s3_url = module.params.get('s3_url')
    src = module.params.get('src')

    for acl in module.params.get('permission'):
        if acl not in CannedACLStrings:
            module.fail_json(msg='Unknown permission specified: %s' % str(acl))

    if overwrite not in ['always', 'never', 'different']:
        if module.boolean(overwrite):
            overwrite = 'always'
        else:
            overwrite = 'never'

    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)

    if region in ('us-east-1', '', None):
        # S3ism for the US Standard region
        location = Location.DEFAULT
    else:
        # Boto uses symbolic names for locations but region strings will
        # actually work fine for everything except us-east-1 (US Standard)
        location = region

    if module.params.get('object'):
        obj = os.path.expanduser(module.params['object'])

    # allow eucarc environment variables to be used if ansible vars aren't set
    if not s3_url and 'S3_URL' in os.environ:
        s3_url = os.environ['S3_URL']

    # bucket names with .'s in them need to use the calling_format option,
    # otherwise the connection will fail. See https://github.com/boto/boto/issues/2836
    # for more details.
    if '.' in bucket:
        aws_connect_kwargs['calling_format'] = OrdinaryCallingFormat()

    # Look at s3_url and tweak connection settings
    # if connecting to Walrus or fakes3
    try:
        if is_fakes3(s3_url):
            fakes3 = urlparse.urlparse(s3_url)
            s3 = S3Connection(is_secure=fakes3.scheme == 'fakes3s',
                              host=fakes3.hostname,
                              port=fakes3.port,
                              calling_format=OrdinaryCallingFormat(),
                              **aws_connect_kwargs)
        elif is_walrus(s3_url):
            walrus = urlparse.urlparse(s3_url).hostname
            s3 = boto.connect_walrus(walrus, **aws_connect_kwargs)
        else:
            s3 = boto.s3.connect_to_region(location,
                                           is_secure=True,
                                           **aws_connect_kwargs)
            # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
            if s3 is None:
                s3 = boto.connect_s3(**aws_connect_kwargs)

    except boto.exception.NoAuthHandlerFound, e:
        module.fail_json(msg='No Authentication Handler found: %s ' % str(e))
コード例 #9
0
ファイル: s3.py プロジェクト: temyers/ansible-modules-core
def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(dict(
            bucket         = dict(required=True),
            dest           = dict(default=None),
            encrypt        = dict(default=True, type='bool'),
            expiry         = dict(default=600, aliases=['expiration']),
            headers        = dict(type='dict'),
            marker         = dict(default=None),
            max_keys       = dict(default=1000),
            metadata       = dict(type='dict'),
            mode           = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
            object         = dict(),
            permission     = dict(type='list', default=['private']),
            version        = dict(default=None),
            overwrite      = dict(aliases=['force'], default='always'),
            prefix         = dict(default=None),
            retries        = dict(aliases=['retry'], type='int', default=0),
            s3_url         = dict(aliases=['S3_URL']),
            src            = dict(),
        ),
    )
    module = AnsibleModule(argument_spec=argument_spec)

    if not HAS_BOTO:
        module.fail_json(msg='boto required for this module')

    bucket = module.params.get('bucket')
    encrypt = module.params.get('encrypt')
    expiry = int(module.params['expiry'])
    if module.params.get('dest'):
        dest = os.path.expanduser(module.params.get('dest'))
    headers = module.params.get('headers')
    marker = module.params.get('marker')
    max_keys = module.params.get('max_keys')
    metadata = module.params.get('metadata')
    mode = module.params.get('mode')
    obj = module.params.get('object')
    version = module.params.get('version')
    overwrite = module.params.get('overwrite')
    prefix = module.params.get('prefix')
    retries = module.params.get('retries')
    s3_url = module.params.get('s3_url')
    src = module.params.get('src')

    for acl in module.params.get('permission'):
        if acl not in CannedACLStrings:
            module.fail_json(msg='Unknown permission specified: %s' % str(acl))

    if overwrite not in ['always', 'never', 'different']:
        if module.boolean(overwrite):
            overwrite = 'always'
        else:
            overwrite = 'never'

    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)

    if region in ('us-east-1', '', None):
        # S3ism for the US Standard region
        location = Location.DEFAULT
    else:
        # Boto uses symbolic names for locations but region strings will
        # actually work fine for everything except us-east-1 (US Standard)
        location = region

    if module.params.get('object'):
        obj = os.path.expanduser(module.params['object'])

    # allow eucarc environment variables to be used if ansible vars aren't set
    if not s3_url and 'S3_URL' in os.environ:
        s3_url = os.environ['S3_URL']

    # bucket names with .'s in them need to use the calling_format option,
    # otherwise the connection will fail. See https://github.com/boto/boto/issues/2836
    # for more details.
    if '.' in bucket:
        aws_connect_kwargs['calling_format'] = OrdinaryCallingFormat()

    # Look at s3_url and tweak connection settings
    # if connecting to Walrus or fakes3
    try:
        if is_fakes3(s3_url):
            fakes3 = urlparse.urlparse(s3_url)
            s3 = S3Connection(
                is_secure=fakes3.scheme == 'fakes3s',
                host=fakes3.hostname,
                port=fakes3.port,
                calling_format=OrdinaryCallingFormat(),
                **aws_connect_kwargs
            )
        elif is_walrus(s3_url):
            walrus = urlparse.urlparse(s3_url).hostname
            s3 = boto.connect_walrus(walrus, **aws_connect_kwargs)
        else:
            aws_connect_kwargs['is_secure'] = True
            try:
                s3 = connect_to_aws(boto.s3, location, **aws_connect_kwargs)
            except AnsibleAWSError:
                # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
                s3 = boto.connect_s3(**aws_connect_kwargs)

    except boto.exception.NoAuthHandlerFound as e:
        module.fail_json(msg='No Authentication Handler found: %s ' % str(e))
    except Exception as e:
        module.fail_json(msg='Failed to connect to S3: %s' % str(e))

    if s3 is None: # this should never happen
        module.fail_json(msg ='Unknown error, failed to create s3 connection, no information from boto.')

    # If our mode is a GET operation (download), go through the procedure as appropriate ...
    if mode == 'get':

        # First, we check to see if the bucket exists, we get "bucket" returned.
        bucketrtn = bucket_check(module, s3, bucket)
        if bucketrtn is False:
            module.fail_json(msg="Source bucket cannot be found", failed=True)

        # Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check.
        keyrtn = key_check(module, s3, bucket, obj, version=version)
        if keyrtn is False:
            if version is not None:
                module.fail_json(msg="Key %s with version id %s does not exist."% (obj, version), failed=True)
            else:
                module.fail_json(msg="Key %s does not exist."%obj, failed=True)

        # If the destination path doesn't exist or overwrite is True, no need to do the md5um etag check, so just download.
        pathrtn = path_check(dest)
        if pathrtn is False or overwrite == 'always':
            download_s3file(module, s3, bucket, obj, dest, retries, version=version)

        # Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists.
        if pathrtn is True:
            md5_remote = keysum(module, s3, bucket, obj, version=version)
            md5_local = module.md5(dest)
            if md5_local == md5_remote:
                sum_matches = True
                if overwrite == 'always':
                    download_s3file(module, s3, bucket, obj, dest, retries, version=version)
                else:
                    module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
            else:
                sum_matches = False

                if overwrite in ('always', 'different'):
                    download_s3file(module, s3, bucket, obj, dest, retries, version=version)
                else:
                    module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.")

        # Firstly, if key_matches is TRUE and overwrite is not enabled, we EXIT with a helpful message.
        if sum_matches is True and overwrite == 'never':
            module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False)

    # if our mode is a PUT operation (upload), go through the procedure as appropriate ...
    if mode == 'put':

        # Use this snippet to debug through conditionals:
#       module.exit_json(msg="Bucket return %s"%bucketrtn)

        # Lets check the src path.
        pathrtn = path_check(src)
        if pathrtn is False:
            module.fail_json(msg="Local object for PUT does not exist", failed=True)

        # Lets check to see if bucket exists to get ground truth.
        bucketrtn = bucket_check(module, s3, bucket)
        if bucketrtn is True:
            keyrtn = key_check(module, s3, bucket, obj)

        # Lets check key state. Does it exist and if it does, compute the etag md5sum.
        if bucketrtn is True and keyrtn is True:
                md5_remote = keysum(module, s3, bucket, obj)
                md5_local = module.md5(src)

                if md5_local == md5_remote:
                    sum_matches = True
                    if overwrite == 'always':
                        upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
                    else:
                        get_download_url(module, s3, bucket, obj, expiry, changed=False)
                else:
                    sum_matches = False
                    if overwrite in ('always', 'different'):
                        upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
                    else:
                        module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.")

        # If neither exist (based on bucket existence), we can create both.
        if bucketrtn is False and pathrtn is True:
            create_bucket(module, s3, bucket, location)
            upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)

        # If bucket exists but key doesn't, just upload.
        if bucketrtn is True and pathrtn is True and keyrtn is False:
            upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)

    # Delete an object from a bucket, not the entire bucket
    if mode == 'delobj':
        if obj is None:
            module.fail_json(msg="object parameter is required", failed=True);
        if bucket:
            bucketrtn = bucket_check(module, s3, bucket)
            if bucketrtn is True:
                deletertn = delete_key(module, s3, bucket, obj)
                if deletertn is True:
                    module.exit_json(msg="Object %s deleted from bucket %s." % (obj, bucket), changed=True)
            else:
                module.fail_json(msg="Bucket does not exist.", changed=False)
        else:
            module.fail_json(msg="Bucket parameter is required.", failed=True)


    # Delete an entire bucket, including all objects in the bucket
    if mode == 'delete':
        if bucket:
            bucketrtn = bucket_check(module, s3, bucket)
            if bucketrtn is True:
                deletertn = delete_bucket(module, s3, bucket)
                if deletertn is True:
                    module.exit_json(msg="Bucket %s and all keys have been deleted."%bucket, changed=True)
            else:
                module.fail_json(msg="Bucket does not exist.", changed=False)
        else:
            module.fail_json(msg="Bucket parameter is required.", failed=True)

    # Support for listing a set of keys
    if mode == 'list':
        bucket_object = get_bucket(module, s3, bucket)

        # If the bucket does not exist then bail out
        if bucket_object is None:
            module.fail_json(msg="Target bucket (%s) cannot be found"% bucket, failed=True)

        list_keys(module, bucket_object, prefix, marker, max_keys)

    # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
    # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
    if mode == 'create':
        if bucket and not obj:
            bucketrtn = bucket_check(module, s3, bucket)
            if bucketrtn is True:
                module.exit_json(msg="Bucket already exists.", changed=False)
            else:
                module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location))
        if bucket and obj:
            bucketrtn = bucket_check(module, s3, bucket)
            if obj.endswith('/'):
                dirobj = obj
            else:
                dirobj = obj + "/"
            if bucketrtn is True:
                keyrtn = key_check(module, s3, bucket, dirobj)
                if keyrtn is True:
                    module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False)
                else:
                    create_dirkey(module, s3, bucket, dirobj)
            if bucketrtn is False:
                created = create_bucket(module, s3, bucket, location)
                create_dirkey(module, s3, bucket, dirobj)

    # Support for grabbing the time-expired URL for an object in S3/Walrus.
    if mode == 'geturl':
        if bucket and obj:
            bucketrtn = bucket_check(module, s3, bucket)
            if bucketrtn is False:
                module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True)
            else:
                keyrtn = key_check(module, s3, bucket, obj)
                if keyrtn is True:
                    get_download_url(module, s3, bucket, obj, expiry)
                else:
                    module.fail_json(msg="Key %s does not exist."%obj, failed=True)
        else:
            module.fail_json(msg="Bucket and Object parameters must be set", failed=True)

    if mode == 'getstr':
        if bucket and obj:
            bucketrtn = bucket_check(module, s3, bucket)
            if bucketrtn is False:
                module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True)
            else:
                keyrtn = key_check(module, s3, bucket, obj, version=version)
                if keyrtn is True:
                    download_s3str(module, s3, bucket, obj, version=version)
                else:
                    if version is not None:
                        module.fail_json(msg="Key %s with version id %s does not exist."% (obj, version), failed=True)
                    else:
                        module.fail_json(msg="Key %s does not exist."%obj, failed=True)

    module.exit_json(failed=False)
コード例 #10
0
ファイル: s3.py プロジェクト: zimbatm/ansible-modules-core
def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(dict(
            bucket         = dict(required=True),
            object         = dict(),
            src            = dict(),
            dest           = dict(default=None),
            mode           = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr'], required=True),
            expiry         = dict(default=600, aliases=['expiration']),
            s3_url         = dict(aliases=['S3_URL']),
            overwrite      = dict(aliases=['force'], default=True, type='bool'),
            metadata      = dict(type='dict'),
        ),
    )
    module = AnsibleModule(argument_spec=argument_spec)

    if not HAS_BOTO:
        module.fail_json(msg='boto required for this module')

    bucket = module.params.get('bucket')
    obj = module.params.get('object')
    src = module.params.get('src')
    if module.params.get('dest'):
        dest = os.path.expanduser(module.params.get('dest'))
    mode = module.params.get('mode')
    expiry = int(module.params['expiry'])
    s3_url = module.params.get('s3_url')
    overwrite = module.params.get('overwrite')
    metadata = module.params.get('metadata')

    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)

    if region in ('us-east-1', '', None):
        # S3ism for the US Standard region
        location = Location.DEFAULT
    else:
        # Boto uses symbolic names for locations but region strings will
        # actually work fine for everything except us-east-1 (US Standard)
        location = region

    if module.params.get('object'):
        obj = os.path.expanduser(module.params['object'])

    # allow eucarc environment variables to be used if ansible vars aren't set
    if not s3_url and 'S3_URL' in os.environ:
        s3_url = os.environ['S3_URL']

    # Look at s3_url and tweak connection settings
    # if connecting to Walrus or fakes3
    try:
        if is_fakes3(s3_url):
            fakes3 = urlparse.urlparse(s3_url)
            s3 = S3Connection(
                is_secure=fakes3.scheme == 'fakes3s',
                host=fakes3.hostname,
                port=fakes3.port,
                calling_format=OrdinaryCallingFormat(),
                **aws_connect_kwargs
            )
        elif is_walrus(s3_url):
            walrus = urlparse.urlparse(s3_url).hostname
            s3 = boto.connect_walrus(walrus, **aws_connect_kwargs)
        else:
            s3 = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_kwargs)
            # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
            if s3 is None:
                s3 = boto.connect_s3(**aws_connect_kwargs)

    except boto.exception.NoAuthHandlerFound, e:
        module.fail_json(msg='No Authentication Handler found: %s ' % str(e))
コード例 #11
0
 (options, args) = parser.parse_args()
 ### LOAD OPTIONS INTO LOCAL VARS
 user1_access = options.user1_access
 user2_access = options.user2_access
 user1_secret = options.user1_secret
 user2_secret = options.user2_secret
 url = options.url
 bucket_name =  "newbuck" + str(int(time.time()))
 key_name = "testfile"
 ## CREATE 2 walrus connections
 try:
     walrus_user1 = boto.connect_walrus(host=url, aws_access_key_id=user1_access, aws_secret_access_key=user1_secret,debug=0)
 except Exception, e:
     fail("Failure creating first walrus Boto conneciton: " + str(e))
 try:   
     walrus_user2 = boto.connect_walrus(host=url, aws_access_key_id=user2_access, aws_secret_access_key=user2_secret,debug=0)
 except Exception, e:
     fail("Failure creating second walrus BOTO: " + str(e))
 print "Connected to walrus successfully"
 ### CREATE A BUCKET WITH USER1
 try:
     buck1 = create_bucket(walrus_user1, bucket_name)  
 except Exception, e:
     fail("Failure creating bucket with user1: " + str(e))
     exit(1)
 ### ADD AN OBJECT TO THE BUCKET
 print "Bucket 1: " + str(buck1)
 try:
     key1 = upload_object_file(walrus_user1, bucket_name, key_name,"./dummyobj")
 except Exception, e:
     fail("Failure uploading file with user1 to walrus: " + str(e))
コード例 #12
0
ファイル: s3.py プロジェクト: RajeevNambiar/temp
def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(dict(
            bucket         = dict(required=True),
            dest           = dict(default=None),
            encrypt        = dict(default=True, type='bool'),
            expiry         = dict(default=600, aliases=['expiration']),
            headers        = dict(type='dict'),
            marker         = dict(default=None),
            max_keys       = dict(default=1000),
            metadata       = dict(type='dict'),
            mode           = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
            object         = dict(),
            permission     = dict(type='list', default=['private']),
            version        = dict(default=None),
            overwrite      = dict(aliases=['force'], default='always'),
            prefix         = dict(default=None),
            retries        = dict(aliases=['retry'], type='int', default=0),
            s3_url         = dict(aliases=['S3_URL']),
            src            = dict(),
        ),
    )
    module = AnsibleModule(argument_spec=argument_spec)

    if not HAS_BOTO:
        module.fail_json(msg='boto required for this module')

    bucket = module.params.get('bucket')
    encrypt = module.params.get('encrypt')
    expiry = int(module.params['expiry'])
    if module.params.get('dest'):
        dest = os.path.expanduser(module.params.get('dest'))
    headers = module.params.get('headers')
    marker = module.params.get('marker')
    max_keys = module.params.get('max_keys')
    metadata = module.params.get('metadata')
    mode = module.params.get('mode')
    obj = module.params.get('object')
    version = module.params.get('version')
    overwrite = module.params.get('overwrite')
    prefix = module.params.get('prefix')
    retries = module.params.get('retries')
    s3_url = module.params.get('s3_url')
    src = module.params.get('src')

    for acl in module.params.get('permission'):
        if acl not in CannedACLStrings:
            module.fail_json(msg='Unknown permission specified: %s' % str(acl))

    if overwrite not in ['always', 'never', 'different']:
        if module.boolean(overwrite):
            overwrite = 'always'
        else:
            overwrite = 'never'

    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)

    if region in ('us-east-1', '', None):
        # S3ism for the US Standard region
        location = Location.DEFAULT
    else:
        # Boto uses symbolic names for locations but region strings will
        # actually work fine for everything except us-east-1 (US Standard)
        location = region

    if module.params.get('object'):
        obj = os.path.expanduser(module.params['object'])

    # allow eucarc environment variables to be used if ansible vars aren't set
    if not s3_url and 'S3_URL' in os.environ:
        s3_url = os.environ['S3_URL']

    # bucket names with .'s in them need to use the calling_format option,
    # otherwise the connection will fail. See https://github.com/boto/boto/issues/2836
    # for more details.
    if '.' in bucket:
        aws_connect_kwargs['calling_format'] = OrdinaryCallingFormat()

    # Look at s3_url and tweak connection settings
    # if connecting to Walrus or fakes3
    try:
        if is_fakes3(s3_url):
            fakes3 = urlparse.urlparse(s3_url)
            s3 = S3Connection(
                is_secure=fakes3.scheme == 'fakes3s',
                host=fakes3.hostname,
                port=fakes3.port,
                calling_format=OrdinaryCallingFormat(),
                **aws_connect_kwargs
            )
        elif is_walrus(s3_url):
            walrus = urlparse.urlparse(s3_url).hostname
            s3 = boto.connect_walrus(walrus, **aws_connect_kwargs)
        else:
            s3 = boto.s3.connect_to_region(location, is_secure=True, **aws_connect_kwargs)
            # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
            if s3 is None:
                s3 = boto.connect_s3(**aws_connect_kwargs)

    except boto.exception.NoAuthHandlerFound, e:
        module.fail_json(msg='No Authentication Handler found: %s ' % str(e))
コード例 #13
0
ファイル: s3.py プロジェクト: tapanswain/ansible-modules-core
    # if connecting to Walrus or fakes3
    if is_fakes3(s3_url):
        try:
            fakes3 = urlparse.urlparse(s3_url)
            s3 = boto.connect_s3(aws_access_key,
                                 aws_secret_key,
                                 is_secure=False,
                                 host=fakes3.hostname,
                                 port=fakes3.port,
                                 calling_format=OrdinaryCallingFormat())
        except boto.exception.NoAuthHandlerFound, e:
            module.fail_json(msg=str(e))
    elif is_walrus(s3_url):
        try:
            walrus = urlparse.urlparse(s3_url).hostname
            s3 = boto.connect_walrus(walrus, aws_access_key, aws_secret_key)
        except boto.exception.NoAuthHandlerFound, e:
            module.fail_json(msg=str(e))
    else:
        try:
            s3 = boto.s3.connect_to_region(
                location,
                aws_access_key_id=aws_access_key,
                aws_secret_access_key=aws_secret_key,
                is_secure=True,
                calling_format=OrdinaryCallingFormat())
        except boto.exception.NoAuthHandlerFound, e:
            module.fail_json(msg=str(e))

    # If our mode is a GET operation (download), go through the procedure as appropriate ...
    if mode == 'get':
コード例 #14
0
ファイル: s3.py プロジェクト: 2ndQuadrant/ansible
def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(dict(
            bucket         = dict(required=True),
            dest           = dict(default=None),
            encrypt        = dict(default=True, type='bool'),
            expiry         = dict(default=600, aliases=['expiration']),
            headers        = dict(type='dict'),
            marker         = dict(default=None),
            max_keys       = dict(default=1000),
            metadata       = dict(type='dict'),
            mode           = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
            object         = dict(),
            permission     = dict(type='list', default=['private']),
            version        = dict(default=None),
            overwrite      = dict(aliases=['force'], default='always'),
            prefix         = dict(default=None),
            retries        = dict(aliases=['retry'], type='int', default=0),
            s3_url         = dict(aliases=['S3_URL']),
            rgw            = dict(default='no', type='bool'),
            src            = dict(),
        ),
    )
    module = AnsibleModule(argument_spec=argument_spec)

    if not HAS_BOTO:
        module.fail_json(msg='boto required for this module')

    bucket = module.params.get('bucket')
    encrypt = module.params.get('encrypt')
    expiry = int(module.params['expiry'])
    if module.params.get('dest'):
        dest = os.path.expanduser(module.params.get('dest'))
    headers = module.params.get('headers')
    marker = module.params.get('marker')
    max_keys = module.params.get('max_keys')
    metadata = module.params.get('metadata')
    mode = module.params.get('mode')
    obj = module.params.get('object')
    version = module.params.get('version')
    overwrite = module.params.get('overwrite')
    prefix = module.params.get('prefix')
    retries = module.params.get('retries')
    s3_url = module.params.get('s3_url')
    rgw = module.params.get('rgw')
    src = module.params.get('src')

    for acl in module.params.get('permission'):
        if acl not in CannedACLStrings:
            module.fail_json(msg='Unknown permission specified: %s' % str(acl))

    if overwrite not in ['always', 'never', 'different']:
        if module.boolean(overwrite):
            overwrite = 'always'
        else:
            overwrite = 'never'

    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)

    if region in ('us-east-1', '', None):
        # S3ism for the US Standard region
        location = Location.DEFAULT
    else:
        # Boto uses symbolic names for locations but region strings will
        # actually work fine for everything except us-east-1 (US Standard)
        location = region

    if module.params.get('object'):
        obj = os.path.expanduser(module.params['object'])

    # allow eucarc environment variables to be used if ansible vars aren't set
    if not s3_url and 'S3_URL' in os.environ:
        s3_url = os.environ['S3_URL']

    # rgw requires an explicit url
    if rgw and not s3_url:
        module.fail_json(msg='rgw flavour requires s3_url')

    # bucket names with .'s in them need to use the calling_format option,
    # otherwise the connection will fail. See https://github.com/boto/boto/issues/2836
    # for more details.
    if '.' in bucket:
        aws_connect_kwargs['calling_format'] = OrdinaryCallingFormat()

    # Look at s3_url and tweak connection settings
    # if connecting to RGW, Walrus or fakes3
    try:
        if s3_url and rgw:
            rgw = urlparse.urlparse(s3_url)
            s3 = boto.connect_s3(
                is_secure=rgw.scheme == 'https',
                host=rgw.hostname,
                port=rgw.port,
                calling_format=OrdinaryCallingFormat(),
                **aws_connect_kwargs
            )
        elif is_fakes3(s3_url):
            fakes3 = urlparse.urlparse(s3_url)
            s3 = S3Connection(
                is_secure=fakes3.scheme == 'fakes3s',
                host=fakes3.hostname,
                port=fakes3.port,
                calling_format=OrdinaryCallingFormat(),
                **aws_connect_kwargs
            )
        elif is_walrus(s3_url):
            walrus = urlparse.urlparse(s3_url).hostname
            s3 = boto.connect_walrus(walrus, **aws_connect_kwargs)
        else:
            aws_connect_kwargs['is_secure'] = True
            try:
                s3 = connect_to_aws(boto.s3, location, **aws_connect_kwargs)
            except AnsibleAWSError:
                # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
                s3 = boto.connect_s3(**aws_connect_kwargs)

    except boto.exception.NoAuthHandlerFound as e:
        module.fail_json(msg='No Authentication Handler found: %s ' % str(e))
    except Exception as e:
        module.fail_json(msg='Failed to connect to S3: %s' % str(e))

    if s3 is None: # this should never happen
        module.fail_json(msg ='Unknown error, failed to create s3 connection, no information from boto.')

    # If our mode is a GET operation (download), go through the procedure as appropriate ...
    if mode == 'get':

        # First, we check to see if the bucket exists, we get "bucket" returned.
        bucketrtn = bucket_check(module, s3, bucket)
        if bucketrtn is False:
            module.fail_json(msg="Source bucket cannot be found", failed=True)

        # Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check.
        keyrtn = key_check(module, s3, bucket, obj, version=version)
        if keyrtn is False:
            if version is not None:
                module.fail_json(msg="Key %s with version id %s does not exist."% (obj, version), failed=True)
            else:
                module.fail_json(msg="Key %s does not exist."%obj, failed=True)

        # If the destination path doesn't exist or overwrite is True, no need to do the md5um etag check, so just download.
        pathrtn = path_check(dest)
        if pathrtn is False or overwrite == 'always':
            download_s3file(module, s3, bucket, obj, dest, retries, version=version)

        # Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists.
        if pathrtn is True:
            md5_remote = keysum(module, s3, bucket, obj, version=version)
            md5_local = module.md5(dest)
            if md5_local == md5_remote:
                sum_matches = True
                if overwrite == 'always':
                    download_s3file(module, s3, bucket, obj, dest, retries, version=version)
                else:
                    module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
            else:
                sum_matches = False

                if overwrite in ('always', 'different'):
                    download_s3file(module, s3, bucket, obj, dest, retries, version=version)
                else:
                    module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.")

        # Firstly, if key_matches is TRUE and overwrite is not enabled, we EXIT with a helpful message.
        if sum_matches is True and overwrite == 'never':
            module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False)

    # if our mode is a PUT operation (upload), go through the procedure as appropriate ...
    if mode == 'put':

        # Use this snippet to debug through conditionals:
#       module.exit_json(msg="Bucket return %s"%bucketrtn)

        # Lets check the src path.
        pathrtn = path_check(src)
        if pathrtn is False:
            module.fail_json(msg="Local object for PUT does not exist", failed=True)

        # Lets check to see if bucket exists to get ground truth.
        bucketrtn = bucket_check(module, s3, bucket)
        if bucketrtn is True:
            keyrtn = key_check(module, s3, bucket, obj)

        # Lets check key state. Does it exist and if it does, compute the etag md5sum.
        if bucketrtn is True and keyrtn is True:
                md5_remote = keysum(module, s3, bucket, obj)
                md5_local = module.md5(src)

                if md5_local == md5_remote:
                    sum_matches = True
                    if overwrite == 'always':
                        upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
                    else:
                        get_download_url(module, s3, bucket, obj, expiry, changed=False)
                else:
                    sum_matches = False
                    if overwrite in ('always', 'different'):
                        upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
                    else:
                        module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.")

        # If neither exist (based on bucket existence), we can create both.
        if bucketrtn is False and pathrtn is True:
            create_bucket(module, s3, bucket, location)
            upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)

        # If bucket exists but key doesn't, just upload.
        if bucketrtn is True and pathrtn is True and keyrtn is False:
            upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)

    # Delete an object from a bucket, not the entire bucket
    if mode == 'delobj':
        if obj is None:
            module.fail_json(msg="object parameter is required", failed=True);
        if bucket:
            bucketrtn = bucket_check(module, s3, bucket)
            if bucketrtn is True:
                deletertn = delete_key(module, s3, bucket, obj)
                if deletertn is True:
                    module.exit_json(msg="Object %s deleted from bucket %s." % (obj, bucket), changed=True)
            else:
                module.fail_json(msg="Bucket does not exist.", changed=False)
        else:
            module.fail_json(msg="Bucket parameter is required.", failed=True)


    # Delete an entire bucket, including all objects in the bucket
    if mode == 'delete':
        if bucket:
            bucketrtn = bucket_check(module, s3, bucket)
            if bucketrtn is True:
                deletertn = delete_bucket(module, s3, bucket)
                if deletertn is True:
                    module.exit_json(msg="Bucket %s and all keys have been deleted."%bucket, changed=True)
            else:
                module.fail_json(msg="Bucket does not exist.", changed=False)
        else:
            module.fail_json(msg="Bucket parameter is required.", failed=True)

    # Support for listing a set of keys
    if mode == 'list':
        bucket_object = get_bucket(module, s3, bucket)

        # If the bucket does not exist then bail out
        if bucket_object is None:
            module.fail_json(msg="Target bucket (%s) cannot be found"% bucket, failed=True)

        list_keys(module, bucket_object, prefix, marker, max_keys)

    # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
    # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
    if mode == 'create':
        if bucket and not obj:
            bucketrtn = bucket_check(module, s3, bucket)
            if bucketrtn is True:
                module.exit_json(msg="Bucket already exists.", changed=False)
            else:
                module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location))
        if bucket and obj:
            bucketrtn = bucket_check(module, s3, bucket)
            if obj.endswith('/'):
                dirobj = obj
            else:
                dirobj = obj + "/"
            if bucketrtn is True:
                keyrtn = key_check(module, s3, bucket, dirobj)
                if keyrtn is True:
                    module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False)
                else:
                    create_dirkey(module, s3, bucket, dirobj)
            if bucketrtn is False:
                created = create_bucket(module, s3, bucket, location)
                create_dirkey(module, s3, bucket, dirobj)

    # Support for grabbing the time-expired URL for an object in S3/Walrus.
    if mode == 'geturl':
        if bucket and obj:
            bucketrtn = bucket_check(module, s3, bucket)
            if bucketrtn is False:
                module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True)
            else:
                keyrtn = key_check(module, s3, bucket, obj)
                if keyrtn is True:
                    get_download_url(module, s3, bucket, obj, expiry)
                else:
                    module.fail_json(msg="Key %s does not exist."%obj, failed=True)
        else:
            module.fail_json(msg="Bucket and Object parameters must be set", failed=True)

    if mode == 'getstr':
        if bucket and obj:
            bucketrtn = bucket_check(module, s3, bucket)
            if bucketrtn is False:
                module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True)
            else:
                keyrtn = key_check(module, s3, bucket, obj, version=version)
                if keyrtn is True:
                    download_s3str(module, s3, bucket, obj, version=version)
                else:
                    if version is not None:
                        module.fail_json(msg="Key %s with version id %s does not exist."% (obj, version), failed=True)
                    else:
                        module.fail_json(msg="Key %s does not exist."%obj, failed=True)

    module.exit_json(failed=False)
コード例 #15
0
ファイル: s3.py プロジェクト: conlini/frozen-ansible
        try:
            fakes3 = urlparse.urlparse(s3_url)
            from boto.s3.connection import OrdinaryCallingFormat
            s3 = boto.connect_s3(
                aws_access_key,
                aws_secret_key,
                is_secure=False,
                host=fakes3.hostname,
                port=fakes3.port,
                calling_format=OrdinaryCallingFormat())
        except boto.exception.NoAuthHandlerFound, e:
            module.fail_json(msg = str(e))
    elif is_walrus(s3_url):
        try:
            walrus = urlparse.urlparse(s3_url).hostname
            s3 = boto.connect_walrus(walrus, aws_access_key, aws_secret_key)
        except boto.exception.NoAuthHandlerFound, e:
            module.fail_json(msg = str(e))
    else:
        try:
            s3 = boto.connect_s3(aws_access_key, aws_secret_key)
        except boto.exception.NoAuthHandlerFound, e:
            module.fail_json(msg = str(e))
 
    # If our mode is a GET operation (download), go through the procedure as appropriate ...
    if mode == 'get':
    
        # First, we check to see if the bucket exists, we get "bucket" returned.
        bucketrtn = bucket_check(module, s3, bucket)
        if bucketrtn is False:
            module.fail_json(msg="Target bucket cannot be found", failed=True)