예제 #1
0
파일: botostore.py 프로젝트: mbr/simplekv
    def __new_key(self, name):
        from boto.s3.key import Key

        k = Key(self.bucket, self.prefix + name)
        if self.metadata:
            k.update_metadata(self.metadata)
        return k
예제 #2
0
    def __new_key(self, name):
        from boto.s3.key import Key

        k = Key(self.bucket, self.prefix + name)
        if self.metadata:
            k.update_metadata(self.metadata)
        return k
예제 #3
0
파일: s3.py 프로젝트: petermelias/assetman
def upload_string_data(bucket, path, string_data, public=False,
                       mime_type=None, metadata=None):

    key = Key(bucket, path)
    if mime_type:
        key.content_type = mime_type
    if metadata:
        key.update_metadata(metadata)
    key.set_contents_from_string(string_data)
    if public:
        key.make_public()
예제 #4
0
파일: s3.py 프로젝트: movermeyer/assetman
def upload_string_data(bucket,
                       path,
                       string_data,
                       public=False,
                       mime_type=None,
                       metadata=None):

    key = Key(bucket, path)
    if mime_type:
        key.content_type = mime_type
    if metadata:
        key.update_metadata(metadata)
    key.set_contents_from_string(string_data)
    if public:
        key.make_public()
예제 #5
0
  def EncryptAndUploadToS3(bucket, filename, metadata):
    """Encrypts the given file and uploads it to S3.

      This operation is idempotent."""

    tmp_fp = os.tmpfile()
    subprocess.call("cat", filename, stdout=tmp_fp)
    metadata['enc-method'] = 0
    # Check for an existing S3 path xattr on the file.
    # Check if the uploaded hash matches this new one.
    if NOOP:
      print bucket, key, metadata, filename
    else:
      key = Key(bucket)
      key.update_metadata(metadata)
      key.key(filename)
      key.set_contents_from_file(tmp_fp, cb=, num_cb=100)
예제 #6
0
파일: trie2.py 프로젝트: Stamped/Stamped
 def add_key(self, name, value, content_type=None, apply_gzip=False, temp_prefix=None):
     assert isinstance(value, basestring)
     
     if apply_gzip:
         name += '.gz'
         
         if temp_prefix is None:
             temp_prefix = threading.currentThread().getName()
         
         # TODO: why does zlib compression not work?
         #value = zlib.compress(value, 6)
         temp  = '.temp.%s.gz' % temp_prefix
         tries = 0
         
         while True:
             try:
                 f = gzip.open(temp, 'wb')
                 f.write(value)
                 f.close()
                 f = open(temp, 'rb')
                 value = f.read()
                 f.close()
                 break
             except:
                 tries += 1
                 
                 if tries >= 5:
                     raise
     
     key = Key(self.bucket, name)
     
     meta = { }
     if content_type is not None:
         meta['Content-Type'] = content_type
     
     if apply_gzip:
         meta['Content-Encoding'] = 'gzip'
     
     if len(meta) > 0:
         key.update_metadata(meta)
     
     # note that the order of setting the key's metadata, contents, and 
     # ACL is important for some seriously stupid boto reason...
     key.set_contents_from_string(value)
     key.set_acl('public-read')
     key.close()
 def __new_key(self, name):
     k = Key(self.bucket, self.prefix + name)
     if self.metadata:
         k.update_metadata(self.metadata)
     return k
예제 #8
0
 def __new_key(self, name):
     k = Key(self.bucket, self.prefix + name)
     if self.metadata:
         k.update_metadata(self.metadata)
     return k
    def upload(self, paths_in=None, path_out=None, 
               force=False, metadata=None, name=None, recursive=False):
        """ Upload files to S3 

            No arguments to upload current directory.
            Inputs: paths_in  - list of directories or files to upload from local
                    path_out  - where on S3 to upload files (specify bucket and key)
                    force     - overwrite files even if they haven't changed
                    metatdata - a python dictionary of metadata to add to each file
                    name      - rename local file to this name on S3
                    recursive - upload directories recursively
        """

        files = []          # file names to save to AWS
        path_in_dic = {}    # local paths to files
        key_name = ""       # Extra key info to add to each file (generated by path out)
        bucket_name = None  # Bucket to save files to

        # If renaming make sure 1 file in and 1 file out
        if name:
            if paths_in == None:
                print "Must specify a file to rename"
                sys.exit(2)
            if not type(paths_in) == types.ListType:
                paths_in = [paths_in]
            if len(paths_in) > 1:
                print "Can only rename 1 file at a time"
                sys.exit(2)

        # If no paths in, set the current working directory
        if not paths_in:
            paths_in = []
            paths_in.append(os.getcwd())
            head, path_out = os.path.split(paths_in[0])

        # Else check that all the paths_in exist.
        else:
            if not type(paths_in) == types.ListType:
                paths_in = [paths_in]
            for path in paths_in:
                if not os.path.exists(path):
                    print ("error: Local path doesn't exist: " + path)
                    sys.exit(2)

        # Connect to S3 and get the buckets
        connection = boto.connect_s3()
        buckets = connection.get_all_buckets()

        # If path_out exists check it for a bucket name
        if path_out:
            normal_path = os.path.normpath(path_out)
            bucketname, d, key_name = normal_path.partition("/")
            for bucket in buckets:
                if bucket.name == bucketname:
                    bucket_name = bucketname
            if not bucket_name: # Ask to create (name = start of path)
                print "Specified path out doesn't contain a bucket name"
                create = raw_input('Would you like to create a bucket named "' + 
                                    bucketname + '" [y/n]: ')
                if not create == 'y' or create == 'yes':
                    print "No buckets to create, terminating."
                    sys.exit(0)
                else: # A random retard says, "hello"
                    bucket_name = bucketname
                    connection.create_bucket(bucket_name, location=self.location)

        # Process each path in paths_in
        for path in paths_in:

            # If no path_out check paths_in[0] (only path) for a bucket name
            if not path_out: 

                files.append("")        # Create an empty first file to add parts to
                local_bucket_path = ""  # Create local bucket path to find .gitignore
                
                # Split apart paths_in and check-for/set bucket name
                normal_path = os.path.normpath(paths_in[0]) # only 1 path since no path_out
                path_parts = normal_path.split("/")
                for path_part in path_parts:
                    if bucket_name == None:
                        for bucket in buckets:
                            if path_part == bucket.name:
                                bucket_name = bucket.name
                        if bucket_name == None:
                            local_bucket_path = os.path.join(local_bucket_path, path_part)
                    else: # Once found bucket name, remaining parts are the key
                        files[0] = os.path.join(files[0], path_part)

                # If no bucket, ask to create if directory otherwise error
                if not bucket_name:
                    if os.path.isfile(path): # Error if file
                        print "Must give a bucket name with a file"
                        sys.exit(1)
                    else: # Ask to create bucket (name = directory)
                        bucket_name = path_parts[len(path_parts)-1]
                        create = raw_input('Would you like to create a bucket named "' + 
                                            bucket_name + '" [y/n]: ')
                        if not create == 'y' or not create == 'yes' or not create == 'Y':
                            print "No buckets to create, terminating."
                            sys.exit(1)
                        else:
                            try:
                                connection.create_bucket(
                                    bucket_name, location=self.location)
                            except e:
                                print "Unable to create bucket"
                                sys.exit(2)

                # If bucket_name exists, try to add gitignore files
                gitignore_file = os.path.join(local_bucket_path, ".gitignore")
                _addIgnoreFile(self, gitignore_file)
                gitignore_file = os.path.join(local_bucket_path, bucket_name)
                gitignore_file = os.path.join(gitignore_file, ".gitignore")
                _addIgnoreFile(self, gitignore_file)

                # Add path to file
                if os.path.isfile(path):
                    path_in_dic[files[0]] = path

                # Read in files if path is a directory
                else:
                    temp_files = _fileList(path, folders=recursive)
                    for file in temp_files:
                        temp_path_in = file
                        file = file.replace(path + "/", "")
                        file = os.path.join(files[0], file)
                        path_in_dic[file] = temp_path_in
                        files.append(file)
                    #Pop to oblivion the first file since it is just the directory itself
                    files.pop(0)
                break # Only 1 path_in when no path_out, so break out of for loop
            
            # SPLIT PATH - pull apart the path_in, place in head and tail
            # file          => head=None       ; tail=file
            # path/in/file  => head=path/in/   ; tail=file
            # path/in       => head=path/      ; tail=in (dir)
            # path/in/      => head=path/in/   ; tail=None
            # never a slash in tail: empty if path ends in /
            head, tail = os.path.split(path)
            
            # If tail is empty then path is a directory so remove / and split again
            if tail == "":
                path = path.rstrip('/')
                head, tail = os.path.split(path)

            # If tail is a file add to files
            if os.path.isfile(path):
                files.append(tail)
                path_in_dic[tail] = path 

            # Else tail is a directory so add files in folder (maybe recursively)
            else:
                temp_files = _fileList(path, folders=recursive)
                for file in temp_files:
                    temp_path_in = file
                    file = file.replace(path + "/", "")
                    path_in_dic[file] = temp_path_in
                    files.append(file)

        # Upload all the files
        bucket = connection.get_bucket(bucket_name)
        for file in files:
            key = os.path.join(key_name, file)

            # If renaming the file swap in the new name
            if name:
                key, d, file_name = key.rpartition("/")
                key = os.path.join(key, name)

            # Skip if type of file we ignore (possibly refactor earlier)
            ignore = False
            for exp in self.ignorefiles:
                if fnmatch.fnmatch(file, exp):
                    ignore = True
                    continue
            if ignore:
                continue
            
            # Add the key with file info to the bucket
            file_name = path_in_dic[file]
            if os.path.isfile(file_name):
                hash_local = _getHash(file_name)
                k = bucket.get_key(key)
                uploadRequired = True
                if k: # Key already exists
                    hash_remote = k.get_metadata('hash')
                    if hash_remote == hash_local:
                        uploadRequired = False
                        print "No change: " + file
                        if name:
                            print "as " + name
                    else:
                        k.set_metadata('hash', hash_local)
                else: # Create the key on S3
                    k = Key(bucket)
                    k.key = key
                    k.set_metadata('hash', hash_local)

                # Upload only if different hash
                if uploadRequired or force:
                    print "Added files: " + file
                    if name:
                        print "as " + name
                    if metadata:
                        k.update_metadata(metadata)
                    k.set_contents_from_filename(file_name)

            else: # Key is directory so just add
                k = Key(bucket)
                k.key = key