예제 #1
1
    def save_job_results(geocoder, job_id):
        """
        Download and save to S3 results for completed jobs.
        """
        logging.info('Saving results for %s to S3' % job_id)
        finished_folder = 'geocode_finished_jobs'
        pending_folder = 'geocode_pending_jobs'

        connection = boto.connect_s3()
        bucket = connection.get_bucket(GEO_BUCKET)
        old_key = bucket.get_key('%s/%s' % (pending_folder, job_id))

        new_name = old_key.get_contents_as_string()
        new_key = Key(bucket)
        new_key.key = '%s/%s' % (finished_folder, new_name)

        results = geocoder.get_job_results(job_id)
        result_string = StringIO.StringIO()
        writer = DictWriter(result_string, fieldnames=results[0].keys())
        writer.writeheader()
        writer.writerows(results)
        result_string.seek(0)

        email_address = old_key.get_metadata('email')
        if email_address:
            new_key.set_metadata('email', email_address)
            send_email_notification(
                email_address, geocoder.get_job_statuses(job_id=job_id), new_name, 'finished')

        new_key.set_contents_from_string(result_string.getvalue())
        new_key.make_public()
        old_key.delete()
예제 #2
0
    def addDataToS3(self, name, data, contentType):
        num_retries = 0
        max_retries = 5

        while True:
            try:
                conn = S3Connection(keys.aws.AWS_ACCESS_KEY_ID, keys.aws.AWS_SECRET_KEY)
                bucket = conn.lookup(self.bucket_name)
                key = Key(bucket, name)
                key.set_metadata('Content-Type', contentType)
                # for some reason, if we use set_contents_from_file here, an empty file is created
                key.set_contents_from_string(data.getvalue(), policy='public-read')
                #key.set_contents_from_file(data, policy='public-read')
                key.close()
                
                return "%s/%s" % (self.base_url, name)

            except Exception as e:
                logs.warning('S3 Exception: %s' % e)
                num_retries += 1
                if num_retries > max_retries:
                    msg = "Unable to connect to S3 after %d retries (%s)" % \
                        (max_retries, self.__class__.__name__)
                    logs.warning(msg)
                    raise Exception(msg)
                
                logs.info("Retrying (%s)" % (num_retries))
                time.sleep(0.5)

            finally:
                try:
                    if not key.closed:
                        key.close()
                except Exception:
                    logs.warning("Error closing key")
예제 #3
0
    def dump(self, key, value, public=False):
        """
        Dump file to S3.

        Optionally make public
        """

        assert isinstance(key, basestring), u'Key must be a string'

        k = Key(self.bucket)
        k.key = key

        try:
            k.set_metadata(u'Content-Type', u'application/json')
            k.set_contents_from_string(json.dumps(value, sort_keys=True, indent=4, separators=(u',', u': ')))

            # set file permissions
            if public:
                k.set_acl(u'public-read')

        except Exception as e:
            print e
            return False

        else:
            # now update the cache
            if self._keys is not None:
                self._keys.add(key)
            return True
예제 #4
0
파일: s3.py 프로젝트: haoericliu/haoliu
 def uploadPublicImageFromString(self, s3KeyName, imageString):
     key = Key(self._bucket)
     key.key = s3KeyName
     # todo: check content-type
     key.set_metadata("Content-Type", "image/jpeg")
     key.set_contents_from_string(imageString)
     key.set_acl("public-read")
예제 #5
0
def _save_file_to_bucket(conn, bucket_name, remote_filename, local_file, **kwargs):
    """ Save the local_file to bucket_name as remote_filename. Also, any additional
    arguments passed as key-value pairs, are stored as file's metadata on S3."""
    # print "Establishing handle with bucket '%s'..." % bucket_name
    b = _get_bucket(conn, bucket_name)
    if b is not None:
        # print "Establishing handle with key object '%s'..." % remote_filename
        k = Key( b, remote_filename )
        print("Attempting to save file '%s' to bucket '%s'..." % (remote_filename, bucket_name))
        try:
            # Store some metadata (key-value pairs) about the contents of the file being uploaded
            # Note that the metadata must be set *before* writing the file
            k.set_metadata('date_uploaded', str(datetime.utcnow()))
            for args_key in kwargs:
                print("Adding metadata to file '%s': %s=%s" % (remote_filename, args_key, kwargs[args_key]))
                k.set_metadata(args_key, kwargs[args_key])
            print("Saving file '%s'" % local_file)
            k.set_contents_from_filename(local_file)
            print("Successfully added file '%s' to bucket '%s'." % (remote_filename, bucket_name))
            make_public = True
            if make_public:
                k.make_public()
        except S3ResponseError as e:
            print("Failed to save file local file '%s' to bucket '%s' as file '%s': %s" % ( local_file, bucket_name, remote_filename, e ))
            return False
        return True
    else:
        return False
예제 #6
0
파일: s3.py 프로젝트: c2h5oh/pghoard
    def store_file_from_disk(self, key, filepath, metadata=None, multipart=None):
        size = os.path.getsize(filepath)
        key = self.format_key_for_backend(key)
        if not multipart or size <= self.multipart_chunk_size:
            s3key = Key(self.bucket)
            s3key.key = key
            if metadata:
                for k, v in metadata.items():
                    s3key.set_metadata(k, v)
            s3key.set_contents_from_filename(filepath, replace=True)
        else:
            start_of_multipart_upload = time.monotonic()
            chunks = math.ceil(size / self.multipart_chunk_size)
            self.log.debug("Starting to upload multipart file: %r, size: %r, chunks: %d",
                           key, size, chunks)
            mp = self.bucket.initiate_multipart_upload(key, metadata=metadata)

            with open(filepath, "rb") as fp:
                part_num = 0
                while fp.tell() < size:
                    part_num += 1
                    start_time = time.monotonic()
                    self._store_multipart_upload(mp, fp, part_num, filepath)
                    self.log.info("Upload of part: %r/%r of %r, part size: %r took: %.2fs",
                                  part_num, chunks, filepath, self.multipart_chunk_size,
                                  time.monotonic() - start_time)
            if len(mp.get_all_parts()) == chunks:
                self.log.info("Multipart upload of %r, size: %r, took: %.2fs, now completing multipart",
                              filepath, size, time.monotonic() - start_of_multipart_upload)
                mp.complete_upload()
            else:
                err = "Multipart upload of {!r} does not match expected chunk list".format(key)
                self.log.error(err)
                mp.cancel_upload()
                raise StorageError(err)
예제 #7
0
    def upload_to_s3(self, file, key, content_type=None):
        try:
            size = os.fstat(file.fileno()).st_size
        except:
            # Not all file objects implement fileno(),
            # so we fall back on this
            file.seek(0, os.SEEK_END)
            size = file.tell()
        conn = boto.connect_s3(self.aws_access_key_id, self.aws_secret_access_key)
        bucket = conn.get_bucket(self.bucketname, validate=False)
        k = Key(bucket)
        k.key = key
        k.delete()
        if content_type:
            k.set_metadata('Content-Type', content_type)
        sent = k.set_contents_from_file(file, policy='public-read')
        # Rewind for later use
        file.seek(0)

        if sent == size:
            url = "https://s3-ap-northeast-1.amazonaws.com/"
            url = url + self.bucketname
            url = url + "/" + key
            print (url)
            return url
        return ""
예제 #8
0
파일: storage.py 프로젝트: weixiyen/Rentfox
def save(key, path):
    key = 'uploads/' + key
    k = Key(getBucket(), key)
    k.set_contents_from_filename(path)
    mime = mimetypes.guess_type(key)
    k.set_metadata("Content-Type", mime)
    k.set_acl('public-read')
예제 #9
0
    def upload_archive(self, filename, key, auto_create_bucket=True):
        """
        Uploads an application archive version to s3
        """
        try:
            bucket = self.s3.get_bucket(self.aws.bucket)
            if ((
                  self.aws.region != 'us-east-1' and self.aws.region != 'eu-west-1') and bucket.get_location() != self.aws.region) or (
                  self.aws.region == 'us-east-1' and bucket.get_location() != '') or (
                  self.aws.region == 'eu-west-1' and bucket.get_location() != 'eu-west-1'):
                raise Exception("Existing bucket doesn't match region")
        except S3ResponseError:
            bucket = self.s3.create_bucket(self.aws.bucket, location=self.aws.region)

        def __report_upload_progress(sent, total):
            if not sent:
                sent = 0
            if not total:
                total = 0
            out("Uploaded " + str(sent) + " bytes of " + str(total) \
                + " (" + str(int(float(max(1, sent)) / float(total) * 100)) + "%)")

        # upload the new version
        k = Key(bucket)
        k.key = self.aws.bucket_path + key
        k.set_metadata('time', str(time()))
        k.set_contents_from_filename(filename, cb=__report_upload_progress, num_cb=10)
예제 #10
0
파일: roles.py 프로젝트: AmyWeiner/inbox
    def _save_to_s3(self, data):
        assert len(data) > 0, "Need data to save!"
        # TODO: store AWS credentials in a better way.
        assert 'AWS_ACCESS_KEY_ID' in config, "Need AWS key!"
        assert 'AWS_SECRET_ACCESS_KEY' in config, "Need AWS secret!"
        assert 'MESSAGE_STORE_BUCKET_NAME' in config, \
            "Need bucket name to store message data!"
        # Boto pools connections at the class level
        conn = S3Connection(config.get('AWS_ACCESS_KEY_ID'),
                            config.get('AWS_SECRET_ACCESS_KEY'))
        bucket = conn.get_bucket(config.get('MESSAGE_STORE_BUCKET_NAME'))

        # See if it alreays exists and has the same hash
        data_obj = bucket.get_key(self.data_sha256)
        if data_obj:
            assert data_obj.get_metadata('data_sha256') == self.data_sha256, \
                "Block hash doesn't match what we previously stored on s3!"
            # log.info("Block already exists on S3.")
            return

        data_obj = Key(bucket)
        # if metadata:
        #     assert type(metadata) is dict
        #     for k, v in metadata.iteritems():
        #         data_obj.set_metadata(k, v)
        data_obj.set_metadata('data_sha256', self.data_sha256)
        # data_obj.content_type = self.content_type  # Experimental
        data_obj.key = self.data_sha256
        # log.info("Writing data to S3 with hash {0}".format(self.data_sha256))
        # def progress(done, total):
        #     log.info("%.2f%% done" % (done/total * 100) )
        # data_obj.set_contents_from_string(data, cb=progress)
        data_obj.set_contents_from_string(data)
def _addImageToS3(bucket, name, data):
    num_retries = 0
    max_retries = 5

    while True:
        try:
            key = Key(bucket, name)
            key.set_metadata("Content-Type", "image/jpeg")
            key.set_contents_from_string(data.getvalue(), policy="public-read")
            key.close()
            return key

        except Exception as e:
            logs.warning("S3 Exception: %s" % e)
            num_retries += 1
            if num_retries > max_retries:
                msg = "Unable to connect to S3 after %d retries (%s)" % (max_retries, self.__class__.__name__)
                logs.warning(msg)
                raise Exception(msg)

            logs.info("Retrying (%s)" % (num_retries))
            time.sleep(0.5)

        finally:
            try:
                if not key.closed:
                    key.close()
            except Exception:
                logs.warning("Error closing key")
예제 #12
0
def addPhoto(photo, setTitle):
    url = flickr.photos_getSizes(photo_id = photo.attrib['id'])
    realUrl = None
    for url in url.find('sizes').findall('size'):
        if url.attrib['label'] == "Original":
            realUrl = url.attrib['source']

    if realUrl:
        keyId = setTitle + "/" + photo.attrib['id'] + ".jpg"
        dataKeyId = keyId + ".metadata"

        # Upload photo
        if bucket.get_key(keyId) is None:
            print "%s not found on S3; uploading" % keyId
            f, h = urllib.urlretrieve(realUrl, reporthook = makeFlickrCallback())
            key = Key(bucket)
            key.key = keyId


            print "Uploading %s to %s/%s" % (photo.attrib['title'], bucket.name, key.key)
            key.set_metadata('flickrInfo', key.key + ".metadata")
            key.set_metadata('inFlickrSet', set.attrib['id'])
            key.set_contents_from_filename(f, cb = makeBotoCallback())
            os.unlink(f)

        # Upload metadata
        if bucket.get_key(dataKeyId) is None:
            print "%s not found on S3, setting metadata" % dataKeyId
            photoInfo = flickr.photos_getInfo(photo_id = photo.attrib['id'], format = "rest")
            key = Key(bucket)
            key.key = dataKeyId
            key.set_contents_from_string(photoInfo) 
예제 #13
0
def _write_files(app, static_url_loc, static_folder, files, bucket,
                 ex_keys=None, hashes=None):
    """ Writes all the files inside a static folder to S3. """
    new_hashes = []
    for file_path in files:
        asset_loc = _path_to_relative_url(file_path)
        key_name = _static_folder_path(static_url_loc, static_folder,
                                       asset_loc)
        msg = "Uploading %s to %s as %s" % (file_path, bucket, key_name)
        logger.debug(msg)

        exclude = False
        if app.config.get('S3_ONLY_MODIFIED', False):
            file_hash = hash_file(file_path)
            new_hashes.append((key_name, file_hash))

            if hashes and hashes.get(key_name, None) == file_hash:
                exclude = True

        if ex_keys and key_name in ex_keys or exclude:
            logger.debug("%s excluded from upload" % key_name)
        else:
            k = Key(bucket=bucket, name=key_name)
            # Set custom headers
            for header, value in app.config['S3_HEADERS'].iteritems():
                k.set_metadata(header, value)
            k.set_contents_from_filename(file_path)
            k.make_public()

    return new_hashes
예제 #14
0
def upload_to_s3(filename, key, bucket='dev.blibb'):
	c = boto.connect_s3()
	b = c.create_bucket(bucket)
	k = Key(b)
	k.key = key
	k.set_metadata('info_test', 'Imported from flask')
	k.set_contents_from_filename(app.config['UPLOAD_FOLDER'] + filename)
예제 #15
0
    def get_key(self, url, create=False):
        import boto
        import boto.s3.connection
        from boto.s3.key import Key

        parsed = urlparse.urlparse(url)
        scheme = parsed.scheme
        self.hostname = parsed.netloc.partition(':')[0]
        self.port = int(parsed.netloc.partition(':')[2])
        path = parsed.path.strip("/")

        self.__conn = boto.connect_s3(
            aws_access_key_id = self.access_key,
            aws_secret_access_key = self.secret_key,
            host = self.hostname,
            port = self.port,
            is_secure = self.is_secure, #False,               # uncommmnt if you are not using ssl
            calling_format = boto.s3.connection.OrdinaryCallingFormat(),
            )

        pos = path.index("/")
        bucket_name = path[:pos]
        key_name = path[pos+1:]
        if create:
            bucket = self.__conn.create_bucket(bucket_name)
            key = Key(bucket, key_name)
            key.set_metadata('mode',33188)
        else:
            bucket = self.__conn.get_bucket(bucket_name)
            key = bucket.get_key(key_name)

        return key
예제 #16
0
 def put_backup_in_s3(self, type, filename):
     """Puts a backup file into s3"""
     k = Key(self.s3bucket)
     k.key = _get_backup_path(self.appid, type)
     _log.info("Putting %s into s3" % k.key)
     k.set_metadata("backup_date", script_start_time.strftime("%s"))
     k.set_contents_from_filename(filename)
예제 #17
0
파일: app.py 프로젝트: tansey/wispr
def upload_geoaudio():
    # API point for uploading geo-tagged sound clips.
    connection = mongo_conn()

    db = connection[os.environ['MONGODB_NAME']]

    audio = request.json['audio']

    # Save the audio to S3
    conn = S3Connection(os.environ['AWS_S3_KEY'], os.environ['AWS_S3_SECRET'])
    bucket = conn.create_bucket(os.environ['AWS_S3_BUCKET'])
    k = Key(bucket)
    k.key = str(uuid.uuid4())
    k.set_metadata("Content-Type", 'audio/aac')
    k.set_contents_from_string(audio)

    # Get the geolocation data
    geoaudioData = {
        "loc": [float(request.json['longitude']), float(request.json['latitude'])],
        "s3_key": k.key
    }

    # Get the collection
    geoaudio = db.geoaudio

    # Insert it into the db
    geoaudio.insert(geoaudioData)

    # Return success
    return jsonify(result="Success")
예제 #18
0
def index(pin):

    s3_conn = S3Connection(AWS_KEY, AWS_SECRET)
    bucket = s3_conn.get_bucket('property-image-cache')
    s3_key = Key(bucket)
    s3_key.key = '{0}.jpg'.format(pin)

    if s3_key.exists():
        output = BytesIO()
        s3_key.get_contents_to_file(output)

    else:
        image_viewer = 'http://www.cookcountyassessor.com/PropertyImage.aspx?pin={0}'
        image_url = image_viewer.format(pin)
        image = requests.get(image_url)

        print(image.headers)

        if  'image/jpeg' in image.headers['Content-Type']:
            output = BytesIO(image.content)
            s3_key.set_metadata('Content-Type', 'image/jpg')
            s3_key.set_contents_from_file(output)
            s3_key.set_acl('public-read')
        else:
            sentry.captureMessage('Could not find image for PIN %s' % pin)
            abort(404)

    output.seek(0)
    response = make_response(output.read())
    response.headers['Content-Type'] = 'image/jpg'
    return response
예제 #19
0
def _upload_to_s3(file_to_upload, path, name):
    '''
    Upload file to S3 using provided keyname.

    Returns:
        public_url: URL to access uploaded file
    '''
    if settings.S3_HOST is None:
        conn = S3Connection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
    else:
        conn = S3Connection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY,
                            host=settings.S3_HOST,
                            calling_format=OrdinaryCallingFormat())
    bucketname = settings.S3_BUCKET
    bucket = conn.create_bucket(bucketname)

    prefix = getattr(settings, 'S3_PATH_PREFIX')
    path = '{0}/{1}'.format(prefix, path)

    k = Key(bucket)
    k.key = '{path}/{name}'.format(path=path, name=name)
    public_url = k.generate_url(60*60*24*365)  # URL timeout in seconds.
    k.set_metadata('filename', file_to_upload.name)
    k.set_contents_from_file(file_to_upload)

    return public_url
def output_s3(fmt, output, opts, version):
    try:
        from boto.s3.connection import S3Connection
        from boto.s3.key import Key
    except ImportError:
        print >> sys.stderr, "Unable to publish to S3: Boto not installed."
        return

    # Verify the S3 configuration
    bucket_name = opts.output_s3
    access_key = opts.aws_access_key or os.environ.get("AWS_ACCESS_KEY_ID")
    secret_key = opts.aws_secret_key or os.environ.get("AWS_SECRET_ACCESS_KEY")

    if not access_key or not secret_key:
        print >> sys.stderr, "We need an AWS access key and AWS secret key"
        return

    conn = S3Connection(access_key, secret_key)
    bucket = conn.get_bucket(bucket_name)
    k = Key(bucket)
    if version == 2:
        file_name = "regions."
    else:
        file_name = "regions-v" + version + "."

    k.key = file_name + fmt

    # Set a content type
    content_types = {"json": "application/json", "xml": "text/xml"}
    if fmt in content_types:
        k.set_metadata("Content-Type", content_types[fmt])

    print "Writing %s/%s" % (bucket_name, k.key)
    k.set_contents_from_string(output)
예제 #21
0
def save_file_in_s3(filename, subdirectory, env, gzipped_file_path):
    if env == 'staging':
        BUCKET_NAME = STAGE_BUCKET_NAME
    else:
        BUCKET_NAME = PROD_BUCKET_NAME


    if subdirectory != '':
        remote_filepath = WP_PATH + subdirectory + '/'
    else:
        remote_filepath = WP_PATH + '/'

    print 'uploading -- %s --  to -- %s --' % (filename, BUCKET_NAME + ' : ' + remote_filepath)

    #set headers
    # css -> content-type: text/css, content-encoding: gzip
    # js -> content-type: application/javascript, content-encoding: gzip

    conn = S3Connection(ACCESS_KEY, SECRET)
    bucket = conn.get_bucket(BUCKET_NAME)
    k = Key(bucket)
    k.key = remote_filepath + filename
    k.set_metadata('Content-Encoding', 'gzip')
    k.set_contents_from_filename(gzipped_file_path)
    k.make_public()

    print '**** Deleting ' + gzipped_file_path + '****'
    os.remove(gzipped_file_path)
예제 #22
0
def s3_create_default_thumbs_for(item):
    """Create copies of the default thumbs for the given item.

    This copies the default files (all named with an id of 'new') to
    use the given item's id. This means there could be lots of duplicate
    copies of the default thumbs, but at least we can always use the
    same url when rendering.

    :param item: A 2-tuple with a subdir name and an ID. If given a
        ORM mapped class with _thumb_dir and id attributes, the info
        can be extracted automatically.
    :type item: ``tuple`` or mapped class instance
    """
    # fallback to normal thumb handling if no S3 engine is enabled.
    storage = get_s3_storage()
    if not storage:
        return None
    bucket = storage.connect_to_bucket()

    image_dir, item_id = normalize_thumb_item(item)

    for key in config['thumb_sizes'][image_dir].iterkeys():
        src_file = os.path.join(config['cache.dir'], 'images', thumb_path((image_dir, 'new'), key))
        dst_file = thumb_path(item, key)
        key = Key(bucket)
        key.key = dst_file

        key.set_metadata('is_default_thumb', '1')
        key.set_contents_from_filename(src_file, {'Content-Type': 'image/jpeg'})
        key.set_acl('public-read')
    return True
예제 #23
0
def s3_upload(filename, label=None, content_disposition=None):
    '''Upload a file to the Amazon S3 bucket configured in local settings.
    (Should be configured to auto-expire after 24 hours.)

    :param filename: full path to file to be uploaded
    :param label: optional file name to use for the download; if not
        specified, will be generated from filename basename
    :param content_disposition: optional content-disposition header value,
        e.g. to prompt download of a type that could be displayed
        in the browser (e.g. xml)
    '''
    s3_conn = S3Connection(settings.AWS_ACCESS_KEY_ID,
                           settings.AWS_SECRET_ACCESS_KEY)
    s3_bucket = s3_conn.get_bucket(settings.AWS_S3_BUCKET)
    key = Key(s3_bucket)
    # use base filename as label, if no label is specified
    if label is None:
        label = basename(filename)
    key.key = label
    if content_disposition is not None:
        key.set_metadata('Content-Disposition', content_disposition)
    # NOTE: if zip file exports get very large (e.g. when including
    # images or deep zoom), this will need to be converted to a
    # multi-part upload
    # for now, log file size so we can check on errors
    filesize = os.stat(filename).st_size
    logger.info('File %s to upload to S3 is %s (%sb)',
                filename, filesizeformat(filesize), filesize)
    key.set_contents_from_filename(filename)
    # make the file publicly readable
    key.set_acl('public-read')

    return 'https://s3.amazonaws.com/%s/%s' % \
        (settings.AWS_S3_BUCKET, label)
예제 #24
0
파일: roles.py 프로젝트: dlitz/inbox
    def _save_to_s3(self, data):
        assert len(data) > 0, 'Need data to save!'

        access_key = config.get_required('AWS_ACCESS_KEY_ID')
        secret_key = config.get_required('AWS_SECRET_ACCESS_KEY')
        bucket_name = config.get_required('MESSAGE_STORE_BUCKET_NAME')

        # Boto pools connections at the class level
        conn = S3Connection(access_key, secret_key)
        bucket = conn.get_bucket(bucket_name, validate=False)

        # See if it already exists and has the same hash
        data_obj = bucket.get_key(self.data_sha256)
        if data_obj:
            assert data_obj.get_metadata('data_sha256') == self.data_sha256, \
                "Block hash doesn't match what we previously stored on s3!"
            return

        data_obj = Key(bucket)
        # if metadata:
        #     assert type(metadata) is dict
        #     for k, v in metadata.iteritems():
        #         data_obj.set_metadata(k, v)
        data_obj.set_metadata('data_sha256', self.data_sha256)
        # data_obj.content_type = self.content_type  # Experimental
        data_obj.key = self.data_sha256
        # log.info("Writing data to S3 with hash {0}".format(self.data_sha256))
        # def progress(done, total):
        #     log.info("%.2f%% done" % (done/total * 100) )
        # data_obj.set_contents_from_string(data, cb=progress)
        data_obj.set_contents_from_string(data)
예제 #25
0
파일: s3.py 프로젝트: bdurrow/pghoard
 def store_file_from_memory(self, key, memstring, metadata=None):
     s3key = Key(self.bucket)
     s3key.key = self.format_key_for_backend(key)
     if metadata:
         for k, v in self.sanitize_metadata(metadata).items():
             s3key.set_metadata(k, v)
     s3key.set_contents_from_string(memstring, replace=True)
예제 #26
0
파일: core.py 프로젝트: gnublade/humus
    def sync(self, source, target_name):
        upload_path = self.compress_data(source, target_name)
        if self.gpg_binary and self.encrypt_command:
            upload_path = self.encrypt_file(upload_path)

        print upload_path
        now = self.now()
        now_str = now.strftime('%Y-%m-%dT%H:%M:%S')
        name_parts = target_name.split('.')
        if len(name_parts) > 1:
            new_name = name_parts[:-1]
            new_name.append(now_str)
            new_name.append(name_parts[-1])
            if self.compress:
                new_name.append('bz2')
        else:
            new_name = name_parts
            new_name.append(now_str)
            if self.compress:
                new_name.append('bz2')


        target_name = u'.'.join(new_name)
        bucket = self.get_bucket()
        key = Key(bucket)
        key.key = os.path.join(self.path, target_name)
        logger.info('Uploading to %s' % key.key)
        key.set_metadata('created', now_str)
        key.set_contents_from_filename(upload_path)
        key.set_acl('private')
예제 #27
0
파일: plants.py 프로젝트: sebavp/Plants-Web
def add():
    image = request.POST.get("photo")
    name = request.POST.get("name")
    common_name = request.POST.get("common_name")
    plant = Plant.retrieve(request.db, name)
    if plant:
        return "This species is already in the DB"
    if image is not None:
        mime = mimetypes.guess_type(image.filename)[0]
        conn = S3Connection("AKIAIMXIHJX3TFDQFVCA", "Lf7xWpeOB9mnY1zfFzl7WNtxtNhmCZ4ZXOI8Kvrr")
        bucket = conn.get_bucket("db_leaves")
        key = Key(bucket)
        key.key = name
        key.set_metadata("Content-Type", mime)
        key.set_contents_from_string(image.value)
        key.set_acl("public-read")

        descriptors, ax, bx, ay, by = EFD(Threshold(image.file).process(), 50, 100).fourier_coefficients()
        return Plant(
            {
                "name": name,
                "common_name": common_name,
                "wiki": request.POST.get("wiki"),
                "photo": "https://s3.amazonaws.com/db_leaves/%s" % quote(name),
                "descriptors": descriptors,
            }
        ).save(request.db)
    return []
예제 #28
0
def upload_to_s3(file_path, path, name):
    """
    Upload file to S3 using provided keyname.

    Returns:
        public_url: URL to access uploaded file
    """
    try:
        conn = S3Connection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
        bucketname = settings.S3_BUCKETNAME
        try:
            bucket = conn.create_bucket(bucketname.lower())
        except Exception:
            bucket = conn.get_bucket(bucketname.lower())
        prefix = getattr(settings, "S3_PATH_PREFIX")
        path = u"{0}/{1}".format(prefix, path)
        key = u"{path}/{name}".format(path=removeNonAscii(path), name=removeNonAscii(name))
        k = Key(bucket)
        k.key = key
        k.set_contents_from_filename(file_path)
        k.set_acl("public-read")
        k.set_metadata("filename", removeNonAscii(name))
        public_url = k.generate_url(60 * 60 * 24 * 365)  # URL timeout in seconds.

        return True, public_url
    except Exception:
        error = "Could not connect to S3."
        log.exception(error)
        return False, error
예제 #29
0
 def download_jobs(geocoder):
     """
     Download and submit jobs from S3.
     """
     logging.info('Downloading jobs')
     awaiting_folder = 'geocode_awaiting_submission'
     pending_folder = 'geocode_pending_jobs'
     connection = boto.connect_s3()
     bucket = connection.get_bucket(GEO_BUCKET)
     files = bucket.list('%s' % awaiting_folder)
     for f in files:
         try:
             name = f.name.replace('%s/' % awaiting_folder, '')
             fkey = bucket.get_key(f.name)
             email_address = fkey.get_metadata('email')
             if name:
                 logging.info('Uploading %s to Bing' % name)
                 job_id = geocoder.upload_address_batch(fkey.get_contents_as_string())
                 if job_id:
                     logging.info('Moving batch with old id %s to new id %s in %s' % (
                         name, job_id, pending_folder))
                     new_key = Key(bucket)
                     new_key.key = '%s/%s' % (pending_folder, job_id)
                     if email_address:
                         logging.info('Setting metadata to %s' % email_address)
                         new_key.set_metadata('email', email_address)
                         send_email_notification(email_address, {}, name, 'pending')
                     new_key.set_contents_from_string(name)
                     old_key = Key(bucket)
                     old_key.key = '%s/%s' % (awaiting_folder, name)
                     old_key.delete()
                 else:
                     send_email_notification(email_address, {}, name, 'error')
         except Exception, e:
             logging.warning('Error uploading %s to Bing: %s' % (name, e))
예제 #30
0
def upload(request, bucket, key, data):

    if not key or key.endswith('/'):
        raise ValueError('key required for upload')

    perms = request.session['perms']
    (can_read, can_write) = has_permission(request.user, perms, bucket, key)

    if not can_write:
        return HttpResponseForbidden('%s does not have access to %s/%s' % (request.user.email, bucket, key or ''))

    b = request.s3.get_bucket(bucket)
    k = Key(bucket=b, name=key)

    if k.exists() and 'force' not in request.GET:
        raise HttpResponseForbidden('write failed: file exists')

    headers = {
        "x-amz-acl": "public-read",
        "Content-Length": len(data),
    }

    ct = mimetypes.guess_type(key)[0]
    if ct is not None:
        headers["Content-Type"] = ct

    k.set_contents_from_string(data, headers=headers)
    k.set_metadata('uploaded-by', request.user.email)

    if request.is_ajax():
        return HttpResponse('{}')
    else:
        return redirect('/' + '/'.join(path.split('/')[:-1]))
예제 #31
0
def store_in_s3(filename, content):
    conn = S3Connection(    config_value("S3_API_KEY"),
        config_value("S3_API_SECRET"),)
    bucket = conn.create_bucket( config_value('S3_BUCKET') )
    k = Key(bucket) # create key on this bucket
    k.key = filename
    mime = mimetypes.guess_type(filename)[0]
    k.set_metadata('Content-Type', mime)
    k.set_contents_from_filename(content)
    k.set_acl('public-read')
예제 #32
0
 def test_normal_object_metadata(self):
     key_name = str(uuid.uuid4())
     bucket = self.conn.create_bucket(self.bucket_name)
     key = Key(bucket, key_name)
     for k, v in self.metadata.items():
         key.set_metadata(k, v)
     key.set_contents_from_string("test_normal_object_metadata")
     self.assert_metadata(bucket, key_name)
     self.change_metadata(bucket, key_name)
     self.assert_updated_metadata(bucket, key_name)
예제 #33
0
def upload_to_s3(aws_access_key_id,
                 aws_secret_access_key,
                 file,
                 bucket,
                 key,
                 sub_directory,
                 callback=None,
                 md5=None,
                 reduced_redundancy=False,
                 content_type=None):
    """
    Uploads the given file to the AWS S3
    bucket and key specified.

    callback is a function of the form:

    def callback(complete, total)

    The callback should accept two integer parameters,
    the first representing the number of bytes that
    have been successfully transmitted to S3 and the
    second representing the size of the to be transmitted
    object.

    Returns boolean indicating success/failure of upload.
    """
    try:
        size = os.fstat(file.fileno()).st_size
    except:
        # Not all file objects implement fileno(),
        # so we fall back on this
        file.seek(0, os.SEEK_END)
        size = file.tell()

    conn = boto.connect_s3(aws_access_key_id, aws_secret_access_key)
    bucket = conn.get_bucket(bucket, validate=True)
    k = Key(bucket)
    if sub_directory != "":
        k.key = os.path.join(sub_directory, key)
    else:
        k.key = key
    if content_type:
        k.set_metadata('Content-Type', content_type)
    sent = k.set_contents_from_file(file,
                                    cb=callback,
                                    md5=md5,
                                    reduced_redundancy=reduced_redundancy,
                                    rewind=True)

    # Rewind for later use
    file.seek(0)

    if sent == size:
        return True
    return False
예제 #34
0
def save_s3(bucket,
            filename,
            contents,
            systemfile,
            content_type=None,
            acl='public-read',
            meta=None,
            encode=None):
    from boto.dynamodb2.table import Item
    key = Key(bucket, filename)
    print 'new s3 key:', 'http://s3.amazonaws.com/' + bucket.name + (
        key.name if key.name.startswith('/') else '/' + key.name)
    if isinstance(meta, Item):
        meta = meta._data
    if isinstance(meta, dict):
        trim_meta = fixed.check_entity_size(meta)
        trim_meta = dict([(k, value) for (k, value) in trim_meta.items()
                          if value is not None and value])
        trim_meta = json.loads(json.dumps(trim_meta, cls=fixed.SetEncoder))
        print 'meta key length:', len(trim_meta.keys())
        key.metadata = trim_meta
    if content_type is not None:
        print 'set content type:', content_type
        key.content_type = content_type
    elif systemfile and systemfile.endswith('js.map'):
        print 'js map!'
        key.content_type = 'application/json'
    elif systemfile:
        gt = mimetypes.guess_type(systemfile)
        key.set_metadata('Content-Type', gt[0])
    if encode is not None and encode == 'gzip':
        key.set_metadata('Content-Encoding', 'gzip')
        gzmem = StringIO.StringIO()
        gzip_file = gzip.GzipFile(fileobj=gzmem, mode='w')
        if contents is not None:
            gzip_file.write(contents)
        elif systemfile is not None:
            with open(systemfile, 'rb') as outfile:
                gzip_file.write(outfile.read())
        gzip_file.close()
        key.set_contents_from_string(gzmem.getvalue())
        print 'gzip!'
    elif contents is not None:
        print 'from string'
        key.set_contents_from_string(contents)
    elif systemfile is not None:
        io = StringIO.StringIO(open(systemfile, 'r').read()).getvalue()
        print 'from disk:', systemfile, 'io:', len(io)
        key.set_contents_from_string(io)
    if acl is not None:
        print 'save acl:', acl
        key.set_acl(acl)
    print 'save complete:', key.name
    return key
예제 #35
0
    def save_to_s3(self, image_str):
        expires = datetime.datetime.now() + datetime.timedelta(days=60)
        expires = expires.strftime("%a, %d %b %Y %H:%M:%S GMT")
        k = Key(settings.S3_ICONS_BUCKET)
        k.key = self.feed.s3_icons_key
        k.set_metadata('Content-Type', 'image/png')
        k.set_metadata('Expires', expires)
        k.set_contents_from_string(image_str.decode('base64'))
        k.set_acl('public-read')

        self.feed.s3_icon = True
예제 #36
0
def upload(filename):

    initalize_connection()
    bucket = conn.get_bucket("ninja-kpom-mailgun")

    k = Key(bucket)
    k.key = "{}.csv".format(uuid.uuid4())
    k.set_contents_from_filename(filename)
    k.set_metadata('Content-Type', "application/csv")
    k.set_acl('public-read')
    return k.generate_url(expires_in=0, query_auth=False)
예제 #37
0
def s3_set_contents_from_file(path, file, private=1):
    '''
    `file` is a file descriptor
    '''
    bucket = s3.get_bucket(PRIVATE_BUCKET_NAME) if private else s3.get_bucket(
        PUBLIC_BUCKET_NAME)
    k = Key(bucket)
    k.key = path
    k.set_metadata('Content-Type', file.content_type)
    file.seek(0)
    return k.set_contents_from_file(file)
예제 #38
0
 def test_delete(self):
     """ delete() should remove package from storage """
     key = Key(self.bucket)
     name, version, path = 'mypkg', '1.2', 'path/to/file.tar.gz'
     key.key = path
     key.set_metadata('name', name)
     key.set_metadata('version', version)
     key.set_contents_from_string('foobar')
     self.storage.delete(key.key)
     new_key = self.bucket.get_key(key.key)
     self.assertIsNone(new_key)
예제 #39
0
파일: layer.py 프로젝트: shahidge4/waposter
 def getDownloadableMediaMessageBody(self, message):
     # save as temp file
     #filename = "%s/%s%s"%(tempfile.gettempdir(),message.getId(),message.getExtension())
     #with open(filename, 'wb') as f:
     #    f.write(message.getMediaContent())
     k = Key(self.b)
     k.key = "%s%s" % (message.getId(), message.getExtension())
     k.set_contents_from_string(str(message.getMediaContent()))
     k.set_metadata('Content-Type', message.getMimeType())
     k.set_acl('public-read')
     return k.generate_url(expires_in=0, query_auth=False)
예제 #40
0
 def storeInS3(recipe_id, filename, content):
     conn = S3Connection(settings.AWS_ACCESS_KEY_ID,
                         settings.AWS_SECRET_ACCESS_KEY)
     b = conn.create_bucket(settings.S3BUCKET)
     mime = mimetypes.guess_type(filename)[0]
     k = Key(b)
     k.key = 'recipe_id_' + str(recipe_id) + '_' + filename
     k.set_metadata("Content_Type", mime)
     content.seek(0)
     k.set_contents_from_file(content)
     k.set_acl("public-read")
예제 #41
0
 def upload_chunk(i):
     chunk = df[i:(i + chunk_size)]
     k = Key(bucket)
     k.key = 'data-{}-{}-{}.csv.gz'.format(name, i, i + chunk_size)
     k.set_metadata('parent', 'db.py')
     out = StringIO()
     with gzip.GzipFile(fileobj=out, mode="w") as f:
         f.write(chunk.to_csv(index=False, encoding='utf-8'))
     k.set_contents_from_string(out.getvalue())
     sys.stdout.write(".")
     return i
예제 #42
0
def upload_to_s3(upload_file,
                 directory,
                 callback=None,
                 md5=None,
                 reduced_redundancy=False,
                 content_type=None):
    """
    Uploads the given file to the AWS S3
    bucket and key specified.

    callback is a function of the form:

    def callback(complete, total)

    The callback should accept two integer parameters,
    the first representing the number of bytes that
    have been successfully transmitted to S3 and the
    second representing the size of the to be transmitted
    object.

    Returns boolean indicating success/failure of upload.
    Borrowed from http://stackabuse.com/example-upload-a-file-to-aws-s3/.
    """
    file = open(upload_file, 'r+')
    key = os.path.join(directory, os.path.split(file.name)[-1])

    try:
        size = os.fstat(file.fileno()).st_size
    except:
        # Not all file objects implement fileno(),
        # so we fall back on this
        file.seek(0, os.SEEK_END)
        size = file.tell()

    conn = boto.connect_s3(SETTINGS['access_id'], SETTINGS['secret_key'])
    bucket = conn.get_bucket(SETTINGS['s3_bucket'], validate=True)
    k = Key(bucket)
    k.key = key

    if content_type:
        k.set_metadata('Content-Type', content_type)

    sent = k.set_contents_from_file(file,
                                    cb=callback,
                                    md5=md5,
                                    reduced_redundancy=reduced_redundancy,
                                    rewind=True)
    k.set_acl('public-read')
    # Rewind for later use
    file.seek(0)

    if sent == size:
        return True
    return False
예제 #43
0
    def put(self, filedata, content_type, remote_path, force=False):

        now = datetime.datetime.utcnow()
        then = now + datetime.timedelta(self.expiration_days)
        expires = then.strftime("%a, %d %b %Y %H:%M:%S GMT")

        if self.aws_prefix:
            remote_path = "%s/%s" % (self.aws_prefix, remote_path)

        (hexdigest, b64digest) = mediasync.checksum(filedata)
        raw_b64digest = b64digest  # store raw b64digest to add as file metadata

        # create initial set of headers
        headers = {
            "x-amz-acl": "public-read",
            "Content-Type": content_type,
            "Expires": expires,
            "Cache-Control": 'max-age=%d' % (self.expiration_days * 24 * 3600),
        }

        key = self._bucket.get_key(remote_path)

        if key is None:
            key = Key(self._bucket, remote_path)

        key_meta = key.get_metadata('mediasync-checksum') or ''
        s3_checksum = key_meta.replace(' ', '+')
        if force or s3_checksum != raw_b64digest:

            key.set_metadata('mediasync-checksum', raw_b64digest)
            key.set_contents_from_string(filedata,
                                         headers=headers,
                                         md5=(hexdigest, b64digest))

            # check to see if file should be gzipped based on content_type
            # also check to see if filesize is greater than 1kb
            if content_type in TYPES_TO_COMPRESS:

                key = Key(self._bucket, "%s.gz" % remote_path)

                filedata = mediasync.compress(filedata)
                (hexdigest, b64digest) = mediasync.checksum(
                    filedata)  # update checksum with compressed data
                headers[
                    "Content-Disposition"] = 'inline; filename="%sgz"' % remote_path.split(
                        '/')[-1]
                headers["Content-Encoding"] = 'gzip'

                key.set_metadata('mediasync-checksum', raw_b64digest)
                key.set_contents_from_string(filedata,
                                             headers=headers,
                                             md5=(hexdigest, b64digest))

            return True
예제 #44
0
 def test_list(self):
     """ Can construct a package from a S3 Key """
     key = Key(self.bucket)
     name, version, path = 'mypkg', '1.2', 'path/to/file.tar.gz'
     key.key = path
     key.set_metadata('name', name)
     key.set_metadata('version', version)
     key.set_contents_from_string('foobar')
     package = list(self.storage.list(Package))[0]
     self.assertEquals(package.name, name)
     self.assertEquals(package.version, version)
     self.assertEquals(package.path, path)
예제 #45
0
def moni_s3_upload(key, data, bucket_path):
    from boto.s3.key import Key
    from boto.s3.connection import S3Connection
    conn = S3Connection(app_settings.S3_ACCESS_KEY,
                        app_settings.S3_SECRET_KEY)
    bucket = conn.get_bucket(bucket_path)
    k = Key(bucket)
    k.key = key
    k.set_contents_from_file(data, rewind=True)
    k.set_acl('public-read')
    k.set_metadata('Content-Type', 'text/csv')
    return k.generate_url(expires_in=0, query_auth=False)
예제 #46
0
 def upload_chunk(i):
     conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
     chunk = df[i:(i + chunk_size)]
     k = Key(bucket)
     k.key = 'data-%d-%d.csv.gz' % (i, i + chunk_size)
     k.set_metadata('parent', 'db.py')
     out = StringIO()
     with gzip.GzipFile(fileobj=out, mode="w") as f:
         f.write(chunk.to_csv(index=False, encoding='utf-8'))
     k.set_contents_from_string(out.getvalue())
     sys.stderr.write(".")
     return i
예제 #47
0
    def save_png(self, keyString, valueString):

        key = Key(self.bucket)
        key.key = keyString
        # good until one year from now, and public
        key.set_metadata('Cache-Control', self.cache_control)
        key.set_metadata('Content-Type', 'image/png')
        key.set_contents_from_string(valueString)
        key.make_public()

        # a non-expiring URL
        return key.generate_url(expires_in=0, query_auth=False)
예제 #48
0
def upload_s3_keys(s3conn, source, bucket_name, prefix, meta):
    start = time.time()
    bucket = s3conn.get_bucket(bucket_name, validate=False)
    k = Key(bucket)
    k.key = prefix
    for m in meta:
        k.set_metadata(m, meta[m])

    k.set_contents_from_filename(source)
    k.set_metadata('time', "foo")

    return time.time() - start
예제 #49
0
 def save_schema(self, schema):
     account = Account.objects.get(visualization__query=self.query)
     conn = S3Connection(
         aws_access_key_id=account.aws_access_key_id,
         aws_secret_access_key=account.aws_secret_access_key)
     bucket = conn.get_bucket('lx-pilot')
     key = Key(bucket)
     key.key = self.schema_key()
     key.set_metadata('Content-Type', 'application/json')
     key.set_metadata('Content-Encoding', 'gzip')
     key.set_contents_from_string(
         gzip.compress(bytes(json.dumps(schema), 'utf-8')))
예제 #50
0
    def push_file_to_s3(bucket, local_file, metadata=None):
        if not CloudHelper.ENABLED['s3']:
            return None

        key_name = os.path.basename(local_file)
        k = Key(CloudHelper.connect_s3_bucket(bucket))
        k.key = key_name
        if metadata is not None:
            for meta_name, meta_value in metadata.iteritems():
                k.set_metadata(meta_name, meta_value)
        k.set_contents_from_filename(local_file)
        return k
예제 #51
0
    def upload(self,
               local,
               remote,
               ignoreMissing=False,
               force=False,
               hash=None):
        # maybe upload and download should use trailing slash to indicate directory should be uploaded instead of just a file
        assert not remote.startswith("/")
        # assert not local.startswith("/")
        remote_path = os.path.normpath(self.remote_path + "/" + remote)
        local_path = os.path.normpath(os.path.join(self.local_dir, local))
        # cope when case where local was passed as an abs path
        # local = os.path.relpath(local, self.local_dir)
        # assert not local.startswith("."), "local={}, local_dir={}".format(local, self.local_dir)
        # local_path = local
        uploaded_url = None

        if os.path.exists(local_path):
            if os.path.isfile(local_path):
                # if it's a file, upload it
                uploaded_url = "s3://" + self.bucket.name + "/" + remote_path
                if self.bucket.get_key(remote_path) is None or force:
                    key = Key(self.bucket)
                    key.name = remote_path
                    log.info("Uploading file %s to %s", local, uploaded_url)
                    key.set_contents_from_filename(local_path)
                    if hash is None:
                        hash = calc_hash(local_path)
                    key.set_metadata("sha256", hash)
            else:
                # upload everything in the dir
                assert hash is None
                for fn in os.listdir(local_path):
                    full_fn = os.path.join(local_path, fn)
                    if os.path.isfile(full_fn):
                        r = os.path.join(remote_path, fn)
                        if self.bucket.get_key(r) is None or force:
                            k = Key(self.bucket)
                            k.key = r
                            log.info("Uploading dir %s (%s to %s)", local_path,
                                     fn, fn)
                            k.set_contents_from_filename(full_fn)
                            hash = calc_hash(local_path)
                            k.set_metadata("sha256", hash)
                        else:
                            log.info(
                                "Uploading dir %s (%s to %s), skiping existing file",
                                local_path, fn, fn)
        elif not ignoreMissing:
            raise Exception("Could not find {}".format(local))

        return uploaded_url
예제 #52
0
    def test_write_over_key_with_meta(self):
        """
        test that metadata does not persist when a key is written over
        """
        key_name = "test-key"
        test_string = os.urandom(1024)
        test_string_1 = os.urandom(1024)
        meta_key = "meta_key"
        meta_value = "pork"

        # create the bucket
        bucket = self._s3_connection.create_unique_bucket()
        self.assertTrue(bucket is not None)

        # create an empty key
        write_key = Key(bucket)

        # set the name
        write_key.name = key_name
        # self.assertFalse(write_key.exists())

        # set some metadata
        write_key.set_metadata(meta_key, meta_value)

        # upload some data
        write_key.set_contents_from_string(test_string)
        self.assertTrue(write_key.exists())

        # create another key to write over the first key
        write_key1 = Key(bucket, key_name)

        # upload some data
        write_key1.set_contents_from_string(test_string_1)
        self.assertTrue(write_key.exists())

        # create another key with the same name
        read_key = Key(bucket, key_name)

        # read back the data
        returned_string = read_key.get_contents_as_string()
        self.assertEqual(returned_string, test_string_1)

        # get the metadata
        returned_meta_value = read_key.get_metadata(meta_key)
        self.assertEqual(returned_meta_value, None)

        # delete the key
        read_key.delete()
        self.assertFalse(write_key.exists())

        # delete the bucket
        self._s3_connection.delete_bucket(bucket.name)
예제 #53
0
def upload_file(gallery, bucket, f):
    logger.debug("Uploading file %s" % (f))

    key = Key(bucket)
    key.key = f

    cache_metadata = generate_cache_metadata(gallery, f)
    if cache_metadata:
        key.set_metadata('Cache-Control', cache_metadata)

    key.set_contents_from_filename(
        os.path.join(gallery.settings['destination'], f),
        policy=gallery.settings['upload_s3_options']['policy'])
예제 #54
0
def _upload_s3(datafile, key_id, access_key, bucket_name, key):
    with open(datafile) as f:
        md5 = compute_md5(f)

    conn = boto.connect_s3(key_id, access_key)
    bucket = conn.get_bucket(bucket_name, validate=False)

    k = Key(bucket)
    k.key = key

    k.set_metadata("Content-Type", "application/json")
    k.set_contents_from_filename(datafile, md5=md5, replace=True)
    return "s3://%s/%s" % (bucket_name, k.key)
예제 #55
0
    def uploadEpubS3(self, book_file, file_dir):
        conn = S3Connection(s3access, s3secret)
        bucket = conn.get_bucket(S3BUCKET)

        k = Key(bucket)

        key_name = 'epubs/' + book_file.filename
        k.key = key_name

        k.set_metadata('Content-Type', 'application/epub+zip')
        k.set_contents_from_string(book_file.read())

        return key_name
예제 #56
0
def test_copy_key_replace_metadata():
    conn = boto.connect_s3('the_key', 'the_secret')
    bucket = conn.create_bucket("foobar")
    key = Key(bucket)
    key.key = "the-key"
    key.set_metadata('md', 'Metadatastring')
    key.set_contents_from_string("some value")

    bucket.copy_key('new-key', 'foobar', 'the-key',
                    metadata={'momd': 'Mometadatastring'})

    bucket.get_key("new-key").get_metadata('md').should.be.none
    bucket.get_key("new-key").get_metadata('momd').should.equal('Mometadatastring')
예제 #57
0
    def post(self, key, data):
        # the new object to persist
        obj = Key(self.bucket)

        # get the previous version, if any
        previous = self.get(key, None)

        if previous:
            obj.set_metadata('previous_version_id', previous.version_id)

        # persist the new object
        obj.key = key
        obj.set_contents_from_string(json.dumps(data))
예제 #58
0
 def upload(self, name, version, filename, data):
     key = Key(self.bucket)
     if self.prepend_hash:
         m = md5()
         m.update(name)
         m.update(version)
         prefix = m.digest().encode('hex')[:4]
         filename = prefix + '/' + filename
     key.key = self.bucket_prefix + filename
     key.set_metadata('name', name)
     key.set_metadata('version', version)
     key.set_contents_from_file(data)
     return key.key
예제 #59
0
def uploadMetadata(setting, nodeid, metadatapath, dest):
    '''Upload metadata to cloud.'''
    #S3connection:
    conn = connectCloud(setting, nodeid)
    bucketname = setting.nodeInfo[nodeid].bucketname
    #Upload metadata to bucket:
    bucket = conn.get_bucket(bucketname)
    k = Key(bucket)
    k.key = dest
    fobj = open(metadatapath, 'rb')
    k.set_metadata('0', base64.urlsafe_b64encode(fobj.read()))
    fobj.close()
    return True
예제 #60
0
def set_metadata():
    """Take a list of files to be uploaded to s3 and gzip CSS, JS, and HTML,
    setting metadata for all files including an 'expires' header defined
    at the beginning of the file. HTML expires after 1 hour."""

    s3_list = s3_filename()
    conn = S3Connection(AWS_KEY,
                        AWS_SECRET_KEY,
                        calling_format=OrdinaryCallingFormat())
    mybucket = conn.get_bucket(AWS_BUCKET)
    expires = time.time() + STATIC_EXPIRES
    expires_header = time.strftime("%a, %d-%b-%Y %T GMT", time.gmtime(expires))

    for filename in s3_list:
        k = Key(mybucket)
        ext = os.path.splitext(filename)[1]
        if ext == '':
            ext = '.html'

        if ext == '.html':  # deletes '.html' from s3 key so no ext on url
            local_name = os.path.splitext(filename)[0]
            if local_name == 'index':
                local_name = '/index.html'
            if local_name[0] != '/':  # if file within child dir
                k.key = AWS_DIRECTORY + '/' + local_name
            else:  # if file in top level dir
                k.key = AWS_DIRECTORY + local_name
            k.set_metadata('Expires', time.time() + HTML_EXPIRES)
        else:
            k.key = AWS_DIRECTORY + '/' + filename  # strip leading 0
            k.set_metadata('Expires', expires_header)

        if ext == '.css' or ext == '.js' or ext == '.html':
            build_file = PUSH_FROM + filename
            f_in = open(build_file, 'rb')
            with gzip.open(build_file + '.gz', 'w+') as f:
                f.writelines(f_in)
            f_in.close()
            f = build_file + '.gz'
            k.set_metadata('Content-Encoding', 'gzip')
        else:
            f = PUSH_FROM + filename

        k.set_metadata('Content-Type', content_types[ext])
        etag_hash = hashlib.sha1(f + str(time.time())).hexdigest()
        k.set_metadata('ETag', etag_hash)
        k.set_contents_from_filename(f)
        k.make_public()

    print '\nPage successfully updated'
    print "On " + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')