def putS3File(localPath, bucketName): ''' Writes the file named in localPath to the S3 bucket called bucketName. Expects to find AWS credentials in the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables ''' # create a connection to s3 connection = boto.connect_s3() # get the bucket that you want to access bucket = connection.get_bucket(bucketName) # create a generic Key object to use with this bucket k = Key(bucket) # finds the object within the bucket with an ID (or key) of localPath k.key = localPath # sets the contents of the new object to the contents of the file named localPath k.set_contents_from_filename(localPath, reduced_redundancy=True) # adds read access for all of us. all users must be AWS users k.add_email_grant('FULL_CONTROL', '*****@*****.**') k.add_email_grant('FULL_CONTROL', '*****@*****.**') k.add_email_grant('FULL_CONTROL', '*****@*****.**')
def putS3File(localPath, bucketName): ''' Writes the file named localPath to a new S3 object in the S3 bucket called bucketName. Sets permissions for the new S3 object for team members. Expects to find AWS credentials in the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables ''' # expects to find AWS credentials in the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables # create a connection to s3 c = boto.connect_s3() # get the bucket that you want to access b = c.get_bucket(bucketName) # create a generic Key object to use with this bucket k = Key(b) # finds the object within the bucket with an ID (or key) of localPath k.key = localPath # sets the contents of the new object to the contents of the file named localPath k.set_contents_from_filename(localPath, reduced_redundancy=True) # add permistions for team members k.add_email_grant('READ', '*****@*****.**') k.add_email_grant('READ', '*****@*****.**') k.add_email_grant('READ', '*****@*****.**')
def launch(name, region, node_type, engine, engine_version=None, num_nodes=1, subnet_group=None, cache_security_groups=None, security_group_ids=None, snapshot=None, snapshot_optional=False, preferred_availability_zone=None, preferred_maintenance_window=None, notification_topic_arn=None, parameter_group=None, port=None, auto_minor_version_upgrade=True, aws_key=None, aws_secret=None, ecconn=None): """ Launch an Elasticache cluster Most arguments are the same as :meth:`.manage` """ if ecconn is None: ecconn = __salt__['aws_util.ecconn'](region, aws_key, aws_secret) if snapshot is not None: snapshots = [snapshot] if snapshot_optional: s3conn = __salt__['aws_util.s3conn'](aws_key, aws_secret) # If the snapshot doesn't exist, ignore it i = 0 while i < len(snapshots): path = snapshots[i] path_components = path.split('/') bucket = s3conn.get_bucket(path_components[0]) key = Key(bucket, '/'.join(path_components[1:])) if not key.exists(): del snapshots[i] else: # Add read-only access to the snapshot if necessary acl = key.get_acl().acl can_read = False for grant in acl.grants: if grant.permission.lower() == 'read' and \ grant.email_address == '*****@*****.**': can_read = True break if not can_read: key.add_email_grant('READ', '*****@*****.**') i += 1 for i in range(len(snapshots)): snapshots[i] = 'arn:aws:s3:::' + snapshots[i] else: snapshots = [] ecconn.create_cache_cluster( name, num_nodes, node_type, engine, engine_version=engine_version, cache_parameter_group_name=parameter_group, cache_subnet_group_name=subnet_group, cache_security_group_names=cache_security_groups, security_group_ids=security_group_ids, snapshot_arns=snapshots, preferred_availability_zone=preferred_availability_zone, preferred_maintenance_window=preferred_maintenance_window, port=port, notification_topic_arn=notification_topic_arn, auto_minor_version_upgrade=auto_minor_version_upgrade)