Exemple #1
0
 def remove(self, path):
     path = self._init_path(path)
     key = boto.s3.key.Key(self._s3_bucket, path)
     if not key.exists():
         raise OSError("No such key: '{}'".format(path))
     # Does this delete folders?
     key.delete()
Exemple #2
0
 def stream_write(self, path, fp):
     # Minimum size of upload part size on S3 is 5MB
     buffer_size = 5 * 1024 * 1024
     if self.buffer_size > buffer_size:
         buffer_size = self.buffer_size
     path = self._init_path(path)
     tmp_path = "tmp/%s" % path
     mp = self._boto_bucket.initiate_multipart_upload(
         tmp_path, encrypt_key=(self._config.s3_encrypt is True))
     num_part = 1
     try:
         while True:
             buf = fp.read(buffer_size)
             if not buf:
                 break
             io = compat.StringIO(buf)
             mp.upload_part_from_file(io, num_part)
             num_part += 1
             io.close()
     except IOError as e:
         raise e
     mp.complete_upload()
     # do this to get the etag correct as the md5
     key = self.makeKey(tmp_path)
     if not key.exists():
         raise IOError('No such key: \'{0}\''.format(path))
     new_key = key.copy(self._config.boto_bucket, path)
     if not new_key.exists():
         raise IOError('No such key: \'{0}\''.format(path + "-tmp"))
     key.delete()
Exemple #3
0
 def s3_delete_file(self, s3_path):
     pr = s3parse(s3_path)
     if pr.scheme not in ["s3", "s3n"]:
         raise ValueError("Not valid s3 path: '%s'" % s3_path)
     bucket = self.s3_conn.get_bucket(pr.netloc)
     prefix_path = pr.path[1:]
     for key in bucket.list(prefix=prefix_path):
         key.delete()
     return True
Exemple #4
0
  def delete(self, source):
    '''Thread worker for download operation.'''
    s3url = S3URL(source)
    bucket = self.s3.lookup(s3url.bucket, validate=self.opt.validate)
    key = bucket.get_key(s3url.path)

    if not self.opt.dry_run:
      key.delete()
    message('Delete %s', source)
Exemple #5
0
  def delete(self, source):
    '''Thread worker for download operation.'''
    s3url = S3URL(source)
    bucket = self.s3.lookup(s3url.bucket, validate=self.opt.validate)
    key = bucket.get_key(s3url.path)

    if not self.opt.dry_run:
      key.delete()
    message('Delete %s', source)
Exemple #6
0
 def s3_delete_file(self, s3_path):
     pr = s3parse(s3_path)
     if pr.scheme not in ["s3", "s3n"]:
         raise ValueError("Not valid s3 path: '%s'" % s3_path)
     bucket = self.s3_conn.get_bucket(pr.netloc)
     prefix_path = pr.path[1:]
     for key in bucket.list(prefix=prefix_path):
         key.delete()
     return True
def _delete_key(bucket, key_name):
    """A "safe" S3 key delete helper.
    Fails silently if there is no such key.
    Args:
        bucket (S3 bucket object)
        key_name (str)
    """
    key = bucket.get_key(key_name)
    if key:
        key.delete()
Exemple #8
0
def _delete_key(bucket, key_name):
    """A "safe" S3 key delete helper.
    Fails silently if there is no such key.
    Args:
        bucket (S3 bucket object)
        key_name (str)
    """
    key = bucket.get_key(key_name)
    if key:
        key.delete()
Exemple #9
0
  def copy(self, source, target, delete_source = False):
    '''Copy a single file from source to target using boto S3 library.'''
    source_url = S3URL(source)
    target_url = S3URL(target)

    if not self.opt.dry_run:
      bucket = self.s3.lookup(source_url.bucket, validate=self.opt.validate)
      key = bucket.get_key(source_url.path)
      key.copy(target_url.bucket, target_url.path)
      if delete_source:
        key.delete()
    message('%s => %s' % (source, target))
Exemple #10
0
  def copy(self, source, target, delete_source = False):
    '''Copy a single file from source to target using boto S3 library.'''
    source_url = S3URL(source)
    target_url = S3URL(target)

    if not self.opt.dry_run:
      bucket = self.s3.lookup(source_url.bucket, validate=self.opt.validate)
      key = bucket.get_key(source_url.path)
      key.copy(target_url.bucket, target_url.path)
      if delete_source:
        key.delete()
    message('%s => %s' % (source, target))
Exemple #11
0
 def remove(self, path):
     path = self._init_path(path)
     key = boto.s3.key.Key(self._s3_bucket, path)
     if key.exists():
         # It's a file
         key.delete()
         return
     # We assume it's a directory
     if not path.endswith('/'):
         path += '/'
     for key in self._s3_bucket.list(prefix=path, delimiter='/'):
         key.delete()
Exemple #12
0
 def remove(self, path):
     path = self._init_path(path)
     key = boto.s3.key.Key(self._s3_bucket, path)
     if key.exists():
         # It's a file
         key.delete()
         return
     # We assume it's a directory
     if not path.endswith('/'):
         path += '/'
     for key in self._s3_bucket.list(prefix=path, delimiter='/'):
         key.delete()
Exemple #13
0
 def remove(self, path):
     self._initialize_cloud_conn()
     path = self._init_path(path)
     key = self._key_class(self._cloud_bucket, path)
     if key.exists():
         # It's a file
         key.delete()
         return
     # We assume it's a directory
     if not path.endswith("/"):
         path += "/"
     for key in self._cloud_bucket.list(prefix=path):
         key.delete()
Exemple #14
0
  def delete_file(self, bucket_name, key_name):
    """ Deletes a file stored in Amazon S3.

    Args:
      bucket_name: A str containing the name of the bucket that the file should
        be downloaded from.
      key_name: A str containing the name of the key that the file should be
        downloaded from.
    """
    bucket = self.connection.lookup(bucket_name)
    key = boto.s3.key.Key(bucket)
    key.key = key_name
    key.delete()
Exemple #15
0
    def promote(self):
        # pylint: disable=C0111
        source_env = 'development'
        dest_env = 'staging'
        if self.app.pargs.extra_arguments:
            source_env = self.app.pargs.extra_arguments[0]
            dest_env = self.app.pargs.extra_arguments[1]
        if source_env not in ('development', 'staging', 'production'):
            raise ValueError(
                'Invalid environment -- only development, staging, and production are available')
        if dest_env not in ('development', 'staging', 'production'):
            raise ValueError(
                'Invalid environment -- only development, staging, and production are available')
        try:
            conn = self.app.S3Connection()
        except boto.exception.NoAuthHandlerFound:
            raise ValueError(
                'No credentials set up, run "aws configure" first')
        source_bucket = conn.get_bucket(
            self.app.config.get('buckets', source_env))
        dest_bucket = conn.get_bucket(self.app.config.get('buckets', dest_env))
        for key in source_bucket.get_all_keys():
            self.app.log.info('Copying %s from %s to %s' %
                              (key.key, source_env, dest_env))
            existing_key = dest_bucket.get_key(key.key)
            if existing_key:
                source_hash = source_bucket.get_key(
                    key.key).get_metadata('hash')
                dest_hash = existing_key.get_metadata('hash')
                if source_hash == dest_hash and not self.app.pargs.force:
                    self.app.log.info(
                        '%s exists and is current, skipping' % (key.key,))
                    continue
                else:
                    dest_bucket.delete_key(key.key)
            options = dict()
            if dest_env != 'production':
                options['X-Robots-Tag'] = 'noindex'
            else:
                options['X-Robots-Tag'] = 'all'
            metadata = dict(hash=source_bucket.get_key(
                key.key).get_metadata('hash'))
            metadata['x-robots-tag'] = options['X-Robots-Tag']
            dest_bucket.copy_key(key.key, source_bucket.name,
                                 key.key, metadata=metadata, preserve_acl=True)

        for key in dest_bucket.get_all_keys():
            if key.key not in [src_key.key for src_key in source_bucket.get_all_keys()]:
                key.delete()
        print("Promoted %s to %s" % (source_env, dest_env))
Exemple #16
0
def remove_file_from_s3(s3_url):
    """
    Attempt to delete the given S3 url. This obviously assumes that the credentials for DOWNLOADABLE_LINK in the settings have delete permission.
    AWS exceptions are not caught and should be caught and suitably logged/handled by the caller.
    Additionally, this function will raise a ValueError if the url passed is not in the s3: scheme, or if the requested
    file does not exist in the bucket.
    It will warn if the requested bucket is not the one configured in the settings for DOWNLOADABLE_LINK but will still attempt to delete
    :param s3_url: S3 url to delete
    :return: boto.s3.key.Key object representing the deleted file
    """
    import urlparse
    from urllib import quote, unquote

    broken_down_url = urlparse.urlparse(s3_url)

    logger.info("Attempting to delete {0}".format(s3_url))
    #check url scheme
    if broken_down_url.scheme != "s3":
        raise ValueError("Provided URL is not an S3 URL")

    #s3 urls have the bucket in the "hostname" part and then the path to key

    s3path = unquote(broken_down_url.path)
    if s3path.startswith("/"):
        s3path = s3path[1:]  #remove any leading "/" from the filepath

    if s3path == "":
        raise ValueError("No file provided to delete")
    #check bucket name
    if broken_down_url.hostname != settings.DOWNLOADABLE_LINK_BUCKET:
        logger.warning(
            "Provided bucket {0} does not match expected value from settings {1}"
            .format(broken_down_url.hostname,
                    settings.DOWNLOADABLE_LINK_BUCKET))

    s3conn = s3_connect()
    bucket = s3conn.get_bucket(broken_down_url.hostname)

    key = bucket.get_key(s3path)
    if key is None:
        raise ValueError(
            "File {0} on bucket {1} does not appear to exist".format(
                s3path, broken_down_url.hostname))
    #exceptions from this are caught in the caller
    key.delete()
    logger.info("Successfully deleted {0}".format(s3_url))
    return key
Exemple #17
0
    def delete(self, key_name):
        bucket = self.connection.get_bucket(self.bucket_name)

        if bucket is None:
            return False

        has_key = True
        while has_key:
            keys = bucket.get_all_keys(prefix=key_name)

            if len(keys) == 0:
                break

            for key in keys:
                key.delete()

        return True
Exemple #18
0
def upload(prefix='repo', local=None):
    """upload to s3 rpms.
        args:
        prefix - s3 prefix
        local - local directory location
    """
    conn = boto.connect_s3(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
    bucket = conn.get_bucket(BUCKET_NAME)

    print "clean target location..."
    bucket_location = prefix
    for key in bucket.list(bucket_location):
        print " ---  delete " + key.key
        key.delete()

    if not local:
        local = prefix
    _upload(bucket, local, bucket_location)
Exemple #19
0
def upload(prefix='repo', local=None):
    """upload to s3 rpms.
        args:
        prefix - s3 prefix
        local - local directory location
    """
    conn = boto.connect_s3(AWS_ACCESS_KEY_ID,
                           AWS_SECRET_ACCESS_KEY)
    bucket = conn.get_bucket(BUCKET_NAME)

    print "clean target location..."
    bucket_location = prefix
    for key in bucket.list(bucket_location):
        print " ---  delete " + key.key
        key.delete()

    if not local:
        local = prefix
    _upload(bucket, local, bucket_location)
Exemple #20
0
    def worker(base_path):
        mtime = path = 0
        while 1:
            try:
                mtime, queued_path = queue.get()

                path = queued_path
                if path is None:
                    return

                key = boto.s3.key.Key(bucket)

                if mtime is None:  # delete
                    try:
                        try:
                            key.key = bucket_prefix + path
                            key.delete()
                        except Exception:
                            logger.exception('deleting %r, retrying' % key.key)
                            time.sleep(9)
                            key.key = bucket_prefix + path
                            key.delete()
                    except Exception:
                        if index is not None:
                            # Failed to delete. Put the key back so we
                            # try again later
                            index[queued_path] = 1
                        raise

                elif mtime is GENERATE:
                    (path, s3mtime) = path
                    fspath = join(base_path, path.encode(encoding))
                    if exists(fspath):
                        # Someone created a file since we decided to
                        # generate one.
                        continue

                    fspath = dirname(fspath)
                    data = "Index of " + path[:-len(INDEX_HTML) - 1]
                    data = [
                        "<!-- generated -->",
                        "<html><head><title>%s</title></head><body>" % data,
                        "<h1>%s</h1><table>" % data,
                        "<tr><th>Name</th><th>Last modified</th><th>Size</th>"
                        "</tr>",
                    ]
                    for name in sorted(os.listdir(fspath)):
                        if name.startswith('.'):
                            continue  # don't index dot files
                        name_path = join(fspath, name)
                        if isdir(name_path):
                            name = name + '/'
                            size = '-'
                        else:
                            size = os.stat(name_path).st_size
                        mtime = time.ctime(os.stat(name_path).st_mtime)
                        name = name.decode(encoding)
                        data.append('<tr><td><a href="%s">%s</a></td>\n'
                                    '    <td>%s</td><td>%s</td></tr>' %
                                    (name, name, mtime, size))
                    data.append("</table></body></html>\n")
                    data = '\n'.join(data)

                    digest = hashlib.md5(data.encode(encoding)).hexdigest()
                    if digest != s3mtime:
                        # Note that s3mtime is either a previous
                        # digest or it's 0 (cus path wasn't in s3) or
                        # it's an s3 upload time.  The test above
                        # works in all of these cases.
                        key.key = bucket_prefix + path
                        key.set_metadata('generated', 'true')
                        try:
                            key.set_contents_from_string(
                                data,
                                headers={'Content-Type': 'text/html'},
                            )
                        except Exception:
                            logger.exception(
                                'uploading generated %r, retrying' % path)
                            time.sleep(9)
                            key.set_contents_from_string(
                                data,
                                headers={'Content-Type': 'text/html'},
                            )

                        if s3mtime:
                            # update (if it was add, mtime would be 0)
                            if cloudfront:
                                invalidations.append(path)

                    if index is not None:
                        index[path] = digest

                else:  # upload
                    try:
                        if had_index:
                            # We only store mtimes to the nearest second.
                            # We don't have a fudge factor, so there's a
                            # chance that someone might update the file in
                            # the same second, so we check if a second has
                            # passed and sleep if it hasn't.
                            now = time_time_from_sixtuple(
                                time.gmtime(time.time()))
                            if not now > mtime:
                                time.sleep(1)

                        key.key = bucket_prefix + path
                        path = join(base_path, path)
                        try:
                            key.set_contents_from_filename(
                                path.encode(encoding))
                        except Exception:
                            logger.exception('uploading %r %r, retrying' %
                                             (mtime, path))
                            time.sleep(9)
                            key.set_contents_from_filename(
                                path.encode(encoding))

                    except Exception:
                        if index is not None:
                            # Upload failed. Remove from index so we
                            # try again later (if the path is still
                            # around).
                            index.pop(queued_path)
                        raise

            except Exception:
                logger.exception('processing %r %r' % (mtime, path))
            finally:
                queue.task_done()
Exemple #21
0
 def remove(self, path):
     path = self._init_path(path)
     key = boto.s3.key.Key(self._s3_bucket, path)
     if not key.exists():
         raise OSError('No such key: \'{0}\''.format(path))
     key.delete()
Exemple #22
0
 def remove(self, path):
     path = self._init_path(path)
     key = boto.s3.key.Key(self._s3_bucket, path)
     if not key.exists():
         raise OSError('No such key: \'{0}\''.format(path))
     key.delete()
Exemple #23
0
def command_delete(bucket, verbose=False):
    for key in bucket.list(prefix=PREFIX):
        if verbose:
            print("Deleting {}".format(key.name))
        key.delete()
Exemple #24
0
def command_delete(bucket, verbose=False):
    for key in bucket.list(prefix=PREFIX):
        if verbose:
            print("Deleting {}".format(key.name))
        key.delete()