示例#1
0
文件: s3.py 项目: dosiecki/NewsBlur
def save_file_in_s3(filename, name=None):
    conn   = S3Connection(ACCESS_KEY, SECRET, calling_format=OrdinaryCallingFormat())
    bucket = conn.get_bucket(BUCKET_NAME)
    k      = Key(bucket)
    k.key  = name or filename

    k.set_contents_from_filename(filename)
示例#2
0
def save_file_in_s3(filename):
    conn   = S3Connection(ACCESS_KEY, SECRET)
    bucket = conn.get_bucket(BUCKET_NAME)
    k      = Key(bucket)
    k.key  = filename

    k.set_contents_from_filename(filename)
def push(article_id, img_name, conn, bucket_name):
    img_path = article_id + "/" + img_name
    post_path = "posts/" + img_path

    if not conn.lookup(bucket_name):
        # Create the bucket and connect to it if it doesn't exist.
        bucket = conn.create_bucket(bucket_name, location=boto.s3.connection.Location.DEFAULT)
    else:
        # Connect to the bucket.
        bucket = conn.get_bucket(bucket_name)
    k = Key(bucket)

    # Give the key the same name as the image.
    k.key = img_path

    local_hash = hash_check(post_path)
    # If the image path exists, check if the image has been modified.
    if k.exists():
        # Find local md5.
        local_hash = hash_check(post_path)
        # Access cloudfront md5.
        cloudfront_hash = bucket.get_key(img_path).etag[1:-1]
        if local_hash != cloudfront_hash:
            print 'Updating ' + img_path + ' in Amazon S3 bucket ' + bucket_name
            k.set_contents_from_filename(post_path)
    else:
        # If the image doesn't exist, add it.
        print 'Uploading ' + img_path + ' to Amazon S3 bucket ' + bucket_name
        k.set_contents_from_filename(post_path)
示例#4
0
def s3_upload(slug, keyname, absolute_path, bucket, tempdir):
    """
    Upload a file to s3
    """
    conn = _s3conn()
    bucket = conn.get_bucket(bucket)

    mimetype = mimetypes.guess_type(absolute_path)
    options = { 'Content-Type' : mimetype[0] }

    # There's a possible race condition if files have the same name
    if mimetype[0] is not None and mimetype[0].startswith('text/'):
        upload = open(absolute_path);
        options['Content-Encoding'] = 'gzip'
        key_parts = keyname.split('/')
        filename = key_parts.pop()
        temp_path = os.path.join(tempdir, filename)
        gzfile = gzip.open(temp_path, 'wb')
        gzfile.write(upload.read())
        gzfile.close()
        absolute_path = temp_path

    k = Key(bucket)
    k.key = '%s/%s' % (slug, keyname)
    k.set_contents_from_filename(absolute_path, options, policy='public-read')
def __upload(fname):
    k = Key(bucket)
    k.key = fname[10:]  #strip off the site_root/
    print fname
    k.set_contents_from_filename(fname)
    k.set_acl('public-read')
    return  k
 def handle(self, *args, **options):
     
      
     
     conn = S3Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
     b = conn.get_bucket(AWS_STORAGE_BUCKET_NAME)
     
     for sked in ['e','b', 'a']:
         filename = "sked%s.csv" % sked
         
         local_skedfile = "%s/%s" % (CSV_EXPORT_DIR, filename)
         print "Dumping sked %s to %s" % (sked, local_skedfile)
         dump_all_sked(sked, local_skedfile)
         
         # need to gzip these
         gzip_cmd = "gzip -f %s" % (local_skedfile)
         filename_zipped = filename + ".gz"
         local_skedfile_zipped = local_skedfile + ".gz"
         # old style os.system just works - subprocess sucks. 
         proc = os.system(gzip_cmd)
         
         s3_path = "%s/%s" % (AWS_BULK_EXPORT_PATH,filename_zipped)
         print "pushing %s to S3: bucket=%s path=%s" % (local_skedfile_zipped, AWS_STORAGE_BUCKET_NAME,s3_path)
         start = time.time()
         k = Key(b)
         k.key = s3_path
         k.set_contents_from_filename(local_skedfile_zipped, policy='public-read')
         elapsed_time = time.time() - start
         print "elapsed time for pushing to s3 is %s" % (elapsed_time)
         
     
     # if we didn't die, set the update time
     set_update(BULK_EXPORT_KEY)
         
示例#7
0
文件: sccpy.py 项目: StochSS/stochss
    def upload_file(self, filename):
        try:
            lifecycle = Lifecycle()
            lifecycle.add_rule('rulename', prefix='logs/', status='Enabled',
                               expiration=Expiration(days=10))
            conn = boto.connect_s3()

            if conn.lookup(self.bucket_name):  # bucket exisits
                bucket = conn.get_bucket(self.bucket_name)
            else:
                # create a bucket
                bucket = conn.create_bucket(self.bucket_name, location=boto.s3.connection.Location.DEFAULT)

            bucket.configure_lifecycle(lifecycle)
            from boto.s3.key import Key

            k = Key(bucket)
            k.key = filename
            k.set_contents_from_filename(filename, cb=self.percent_cb, num_cb=10)
            k.set_acl('public-read-write')

        except Exception, e:
            sys.stdout.write("AmazonS3Agent failed with exception:\n{0}".format(str(e)))
            sys.stdout.flush()
            raise e
示例#8
0
def addPhoto(photo, setTitle):
    url = flickr.photos_getSizes(photo_id = photo.attrib['id'])
    realUrl = None
    for url in url.find('sizes').findall('size'):
        if url.attrib['label'] == "Original":
            realUrl = url.attrib['source']

    if realUrl:
        keyId = setTitle + "/" + photo.attrib['id'] + ".jpg"
        dataKeyId = keyId + ".metadata"

        # Upload photo
        if bucket.get_key(keyId) is None:
            print "%s not found on S3; uploading" % keyId
            f, h = urllib.urlretrieve(realUrl, reporthook = makeFlickrCallback())
            key = Key(bucket)
            key.key = keyId


            print "Uploading %s to %s/%s" % (photo.attrib['title'], bucket.name, key.key)
            key.set_metadata('flickrInfo', key.key + ".metadata")
            key.set_metadata('inFlickrSet', set.attrib['id'])
            key.set_contents_from_filename(f, cb = makeBotoCallback())
            os.unlink(f)

        # Upload metadata
        if bucket.get_key(dataKeyId) is None:
            print "%s not found on S3, setting metadata" % dataKeyId
            photoInfo = flickr.photos_getInfo(photo_id = photo.attrib['id'], format = "rest")
            key = Key(bucket)
            key.key = dataKeyId
            key.set_contents_from_string(photoInfo) 
示例#9
0
def deploy_front():

	conn = S3Connection()
	if "page" not in sys.argv:
		deploy_static("www.sentimentron.co.uk")
	bucket = conn.get_bucket('www.sentimentron.co.uk')

	front_page = Key(bucket)
	front_page.key = "index.html"
	front_page.set_contents_from_filename("index.html")

	info_page  = Key(bucket)
	info_page.key  = "info.html"
	info_page.set_contents_from_filename("info.html")

	example_page = Key(bucket)
	example_page.key = "examples.html"
	example_page.set_contents_from_filename("examples.html")

	paths = Key(bucket)
	paths.key = "paths.js"
	paths.set_contents_from_filename("paths.production.js")

	spinner = Key(bucket)
	paths.key = "spinner.gif"
	paths.set_contents_from_filename("spinner.gif")
def upload_to_s3(job, job_vars):
    """
    If s3_dir is specified in arguments, file will be uploaded to S3 using boto.
    WARNING: ~/.boto credentials are necessary for this to succeed!

    job_vars: tuple     Tuple of dictionaries: input_args and ids
    """
    import boto
    from boto.s3.key import Key

    input_args, ids = job_vars
    work_dir = job.fileStore.getLocalTempDir()
    uuid = input_args['uuid']
    # Parse s3_dir
    s3_dir = input_args['s3_dir']
    bucket_name = s3_dir.split('/')[0]
    bucket_dir = '/'.join(s3_dir.split('/')[1:])
    # Upload to S3 via boto
    conn = boto.connect_s3()
    bucket = conn.get_bucket(bucket_name)
    k = Key(bucket)
    if 'error.txt' in ids:
        read_from_filestore(job, work_dir, ids, 'error.txt')
        k.key = os.path.join(bucket_dir, uuid + '.ERROR')
        k.set_contents_from_filename(os.path.join(work_dir, 'error.txt'))
    else:
        read_from_filestore(job, work_dir, ids, 'uuid.tar.gz')
        uuid_tar = os.path.join(work_dir, 'uuid.tar.gz')
        if 'R.fastq' in ids:
            k.key = os.path.join(bucket_dir, uuid + 'single-end' + '.tar.gz')
        else:
            k.key = os.path.join(bucket_dir, uuid + '.tar.gz')
        k.set_contents_from_filename(uuid_tar)
示例#11
0
def main(stream_url: str, stream_name: str, bucket_name: str, duration: str):
    temp_file = 'temp.m4a'

    print('beginning rip')

    code = subprocess.call(['ffmpeg',
                            '-i', stream_url,
                            '-t', duration,
                            '-acodec', 'copy',
                            '-absf', 'aac_adtstoasc',
                            temp_file])

    assert code == 0, 'stream rip failed with code ' + str(code)

    print('connecting to s3')
    conn = S3Connection(is_secure=False)  # AWS uses invalid certs
    bucket = conn.get_bucket(bucket_name)

    print('writing recorded file to s3')
    m4a = Key(bucket)
    m4a.name = datetime.datetime.utcnow().strftime(stream_name + '--%Y-%m-%d.m4a')
    m4a.content_type = MIME_TYPE
    m4a.metadata = {'Content-Type': MIME_TYPE}
    m4a.storage_class = 'STANDARD_IA'
    m4a.set_contents_from_filename(temp_file)
    m4a.close()

    print('generating new feed.xml from s3 bucket list')
    feed_xml = Key(bucket)
    feed_xml.name = 'feed.xml'
    feed_xml.content_type = 'application/rss+xml'
    feed_xml.set_contents_from_string(
        rss_xml(stream_name, bucket_name, bucket.list()))
    feed_xml.close()
示例#12
0
def upload_s3_book(release, directory):
    conn = boto.s3.connect_to_region(
        'us-west-1', calling_format=OrdinaryCallingFormat())
    bucket = conn.get_bucket('readiab.org')

    html = {'Content-type': 'text/html; charset=utf-8'}

    key_prefix = 'book/%s/' % release
    root_offset = None

    for root, dirs, files in os.walk(directory):
        if not root_offset:
            root_offset = root

        r = root.replace(root_offset, '').replace('/', '')
        for file in files:
            key = key_prefix
            if r:
                key += r + '/'

            key += file
            if file.startswith('index'):
                key += '.html'

            path = os.path.join(root, file)

            upload = Key(bucket)
            upload.key = key
            if '.zip' in path:
                upload.set_contents_from_filename(path)
            else:
                upload.set_contents_from_filename(path, headers=html)
示例#13
0
def upload_to_s3(file_path, path, name):
    """
    Upload file to S3 using provided keyname.

    Returns:
        public_url: URL to access uploaded file
    """
    try:
        conn = S3Connection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
        bucketname = settings.S3_BUCKETNAME
        try:
            bucket = conn.create_bucket(bucketname.lower())
        except Exception:
            bucket = conn.get_bucket(bucketname.lower())
        prefix = getattr(settings, "S3_PATH_PREFIX")
        path = u"{0}/{1}".format(prefix, path)
        key = u"{path}/{name}".format(path=removeNonAscii(path), name=removeNonAscii(name))
        k = Key(bucket)
        k.key = key
        k.set_contents_from_filename(file_path)
        k.set_acl("public-read")
        k.set_metadata("filename", removeNonAscii(name))
        public_url = k.generate_url(60 * 60 * 24 * 365)  # URL timeout in seconds.

        return True, public_url
    except Exception:
        error = "Could not connect to S3."
        log.exception(error)
        return False, error
示例#14
0
    def upload_archive(self, filename, key, auto_create_bucket=True):
        """
        Uploads an application archive version to s3
        """
        try:
            bucket = self.s3.get_bucket(self.aws.bucket)
            if ((
                  self.aws.region != 'us-east-1' and self.aws.region != 'eu-west-1') and bucket.get_location() != self.aws.region) or (
                  self.aws.region == 'us-east-1' and bucket.get_location() != '') or (
                  self.aws.region == 'eu-west-1' and bucket.get_location() != 'eu-west-1'):
                raise Exception("Existing bucket doesn't match region")
        except S3ResponseError:
            bucket = self.s3.create_bucket(self.aws.bucket, location=self.aws.region)

        def __report_upload_progress(sent, total):
            if not sent:
                sent = 0
            if not total:
                total = 0
            out("Uploaded " + str(sent) + " bytes of " + str(total) \
                + " (" + str(int(float(max(1, sent)) / float(total) * 100)) + "%)")

        # upload the new version
        k = Key(bucket)
        k.key = self.aws.bucket_path + key
        k.set_metadata('time', str(time()))
        k.set_contents_from_filename(filename, cb=__report_upload_progress, num_cb=10)
示例#15
0
def _upload_to_s3(filename):
    if not app.config.get('UPLOAD_SCREENSHOTS_TO_S3', False):
        return

    import boto
    from boto.s3.key import Key
    conn = boto.connect_s3()
    b = conn.get_bucket(app.config['S3_BUCKET'])
    k = Key(b)
    k.key = '{}/{}'.format(
        app.config.get('S3_FILES_PREFIX', 'sleepypuppy'),
        filename
    )
    k.set_contents_from_filename(
        "{}/{}".format(
            app.config['UPLOAD_FOLDER'],
            filename
        )
    )
    os.remove(
        "{}/{}".format(
            app.config['UPLOAD_FOLDER'],
            filename
        )
    )
示例#16
0
def upload():
    """ Upload controller """

    print 'Starting upload...'
    if request.method == 'POST':
        image = request.files['file']
        print 'File obtained...'

        if allowed_file(image.filename):
            print 'Image allowed.'
            filename = secure_filename(image.filename)
            image.save(os.path.join(UPLOAD_FOLDER, filename))

            print 'Uploading to s3...'
            conn = boto.connect_s3(AWS_KEY, AWS_SECRET)
            b = conn.get_bucket(AWS_BUCKET)
            k = Key(b)

            print 'Setting key...'
            k.key = '%s_%s' % (uuid.uuid4(), filename)
            k.set_contents_from_filename(UPLOAD_FOLDER + filename)

            print 'Making public...'
            k.make_public
            k.set_acl('public-read')

            print 'Responding to request...'
            return jsonify(status='Success.')
        else:
            print 'File not allowed.'
            return jsonify(status='File not allowed.')
    else:
        print 'Upload failed.'
        return jsonify(status='fail')
示例#17
0
def Seppuku(why):
    # Get the instance ID
    r = requests.get("http://169.254.169.254/latest/meta-data/instance-id")
    if r.status_code != 200:
        wf.logger.logger.error("Seppuku() unable to get instance ID")
        exit(3)
    instance_id = r.text

    # Declare our intent
    wf.logger.logger.error("Seppuku(%s): Instance is stopping because [%s]" % (instance_id, why))

    # Save a copy of the latest syslog to S3
    s3_conn = boto.connect_s3()
    bucket = s3_conn.get_bucket('wf-instance-logs')
    key = Key(bucket)
    key.key = "%s.txt" % instance_id
    wf.logger.logger.error("Seppuku(%s): copying log to %s" % (instance_id, key.generate_url(0)))
    key.set_contents_from_filename('/var/log/syslog')


    # Now commit Seppuku
    ec2_conn = boto.ec2.connect_to_region("us-west-1")
    # this can throw an exception.  Protect later.
    ec2_conn.terminate_instances(instance_ids=[instance_id])
    time.sleep(60*5)

    # What!  No sleep?  Then halt
    subprocess.check_call(["sudo", "halt"])
    time.sleep(60*5)
    exit(9)
示例#18
0
def upload(state, filename):
    today = datetime.date.today()

    # build URL
    s3_bucket = 'data.openstates.sunlightlabs.com'
    n = 1
    s3_path = '%s-%02d-%s-r%d.zip' % (today.year, today.month, state, n)
    s3_url = 'http://%s.s3.amazonaws.com/%s' % (s3_bucket, s3_path)

    metadata = db.metadata.find_one({'_id':state})
    old_url = metadata.get('latest_dump_url')

    if s3_url == old_url:
        old_num = re.match('.*?-r(\d*).zip', old_url).groups()[0]
        n = int(old_num)+1
        s3_path = '%s-%02d-%s-r%d.zip' % (today.year, today.month, state, n)
        s3_url = 'http://%s.s3.amazonaws.com/%s' % (s3_bucket, s3_path)

    # S3 upload
    s3conn = boto.connect_s3(settings.AWS_KEY, settings.AWS_SECRET)
    bucket = s3conn.create_bucket(s3_bucket)
    k = Key(bucket)
    k.key = s3_path
    k.set_contents_from_filename(filename)
    k.set_acl('public-read')

    metadata['latest_dump_url'] = s3_url
    metadata['latest_dump_date'] = datetime.datetime.utcnow()
    db.metadata.save(metadata, safe=True)

    print 'uploaded to %s' % s3_url
    def upload_package(self, application_name, package_path):
        bucket = self._connection.get_bucket(application_name)
        key = Key(bucket)
        key.key = os.path.basename(package_path)
        key.set_contents_from_filename(package_path)

        return application_name, key.key
示例#20
0
def _upload_file(file_path, bucket, key_name, headers={}, do_gzip=False):
    k = Key(bucket=bucket, name=key_name)
    for header, value in headers.items():
        if (header, value) != ('Content-Encoding', 'gzip'):
            k.set_metadata(header, value)
    mimetype = mimetypes.guess_type(file_path)[0]
    if mimetype:
        k.set_metadata('Content-Type', mimetype)
    with open(file_path) as f:
        content = f.read()
        if do_gzip:
            k.set_metadata('Content-Encoding', 'gzip')
            gzipped = StringIO()
            with gzip.GzipFile(fileobj=gzipped, mode='w') as _gzip:
                _gzip.write(content)
            content = gzipped.getvalue()
    try:
        k.set_contents_from_string(content)
    except S3ResponseError:
        if not do_gzip:
            k.set_contents_from_filename(file_path)
        else:
            raise
    k.make_public()
    return k
示例#21
0
def make_zip(directory):
    if None in [settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY]:
        raise ImproperlyConfigured("AWS configuration not set.")

    conn = S3Connection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
    bucket = conn.create_bucket(settings.AWS_BUCKET)

    filename = os.path.basename(directory) + ".zip"
    zip_file = zipfile.ZipFile(filename, "w")

    for root, dirs, files in os.walk(directory):
        for file in files:
            path = os.path.join(root, file)

            arcname = path.replace(directory, "")

            zip_file.write(path, arcname)

    zip_file.close()

    k = Key(bucket)
    k.key = filename
    k.set_contents_from_filename(filename)
    k.set_acl("public-read")

    os.remove(filename)

    return k.generate_url(24 * 60 * 60)
    def _upload_attachments_to_s3(self):
        try:
            if settings.EMAIL_QUEUE['aws'].get('attachment_bucket') is None:
                error_logger.error("Attachments require attachment_bucket under settings. Skipping sending attachments")
                return []
            if self._s3_conn is None:
                self._s3_conn = S3Connection(settings.EMAIL_QUEUE['aws']['key'], settings.EMAIL_QUEUE['aws']['secret'])
            bucket = self._s3_conn.get_bucket(settings.EMAIL_QUEUE['aws']['attachment_bucket'])
            uploaded_attachments = []
            for attachment in self.attachments:
                k = Key(bucket)
                filename =  os.path.basename(urlparse.urlsplit(attachment.get('url')).path)
                k.key = filename
                k.set_contents_from_filename(attachment.get('url'))
                if settings.EMAIL_QUEUE['aws'].get('s3-url-endpoint') is None:
                    s3_url_endpoint = "https://s3-" + settings.EMAIL_QUEUE['aws']['region'] + ".amazonaws.com/" + settings.EMAIL_QUEUE['aws']['attachment_bucket'] + '/'
                else:
                    s3_url_endpoint = settings.EMAIL_QUEUE['aws'].get('s3-url-endpoint')
                s3_uploaded_url = s3_url_endpoint + filename
                uploaded_attachment = copy.deepcopy(attachment)
                uploaded_attachment['url'] = s3_uploaded_url
                uploaded_attachments.append(uploaded_attachment)

            return uploaded_attachments
        except Exception as e:
            raise e
示例#23
0
文件: dump.py 项目: hpetru/billy
def upload(abbr, filename, type, s3_prefix='downloads/', use_cname=True):
    today = datetime.date.today()

    # build URL
    s3_bucket = settings.AWS_BUCKET
    s3_path = '%s%s-%02d-%02d-%s-%s.zip' % (s3_prefix, today.year, today.month,
                                            today.day, abbr, type)
    if use_cname:
        s3_url = 'http://%s/%s' % (s3_bucket, s3_path)
    else:
        s3_url = 'http://%s.s3.amazonaws.com/%s' % (s3_bucket, s3_path)

    # S3 upload
    s3conn = boto.connect_s3(settings.AWS_KEY, settings.AWS_SECRET)
    bucket = s3conn.create_bucket(s3_bucket)
    k = Key(bucket)
    k.key = s3_path
    logging.info('beginning upload to %s' % s3_url)
    k.set_contents_from_filename(filename)
    k.set_acl('public-read')

    meta = metadata(abbr)
    meta['latest_%s_url' % type] = s3_url
    meta['latest_%s_date' % type] = datetime.datetime.utcnow()
    db.metadata.save(meta, safe=True)

    logging.info('uploaded to %s' % s3_url)
示例#24
0
def send_to_s3(items=None, is_binary=False):
    """
    For items in an iterable, send them to your s3 account
    """
    conn, bucket = s3_init(AWS_ACCESS_KEY_ID, AWS_SECRET_KEY, BUCKET_NAME)
    for label, data in items:
        key = Key(bucket)
        key.key = label
        for item in bucket.list():
            local_md5 = hashlib.md5(data).hexdigest()
            if item.name == label: 
                key.open()
                key.close(); #loads key.etag
                # remote hash
                remote_md5 = key.etag.replace('\"','') # clears quote marks
                # If new backup is different than the last saved one, update it
                if local_md5 != remote_md5:
                    if is_binary:
                        key.set_contents_from_filename(data)
                    else:
                        key.set_contents_from_string(data)
        else:
            if is_binary:
                key.set_contents_from_filename(data)
            else:
                key.set_contents_from_string(data)
示例#25
0
    def upload(self, key, filename, is_public=False, metadata=None):

        k = Key(self.bucket)
        k.key = key

        headers = {'Cache-Control': 'max-age=31536000'}
        content_type, encoding = mimetypes.guess_type(filename)
        if content_type is not None:
            headers['Content-Type'] = content_type
        if encoding == 'gzip':
            headers['Content-Encoding'] = 'gzip'

        if metadata is not None:
            for key in metadata:
                headers['x-amz-meta-' + key] = metadata[key]

        for _ in xrange(5):
            try:
                k.set_contents_from_filename(
                    filename,
                    headers=headers,
                    policy=('public-read' if is_public else 'private')
                )
                logger.info('Upload %s -> %s', filename, k.name)
                break

            except Exception as e:
                logger.exception(e)
                logger.warn('Try upload again')

        else:
            logger.error('Retry more than 5 times, give it up.')
            raise ExceedMaxRetryError()
示例#26
0
def upload_to_s3(filename, key, bucket='dev.blibb'):
	c = boto.connect_s3()
	b = c.create_bucket(bucket)
	k = Key(b)
	k.key = key
	k.set_metadata('info_test', 'Imported from flask')
	k.set_contents_from_filename(app.config['UPLOAD_FOLDER'] + filename)
示例#27
0
def upload_build(build_file, bucket_name, project_name):
    '''
    Upload the given build zip file to the specified S3 bucket/project
    directory.
    
    @type build_file: String
    @param build_file: (ZIP) file containing the build that should be uploaded
    
    @type bucket_name: String
    @param bucket_name: Name of the S3 bucket to use
    
    @type project_name: String
    @param project_name: Name of the project folder inside the S3 bucket
    '''

    if not os.path.exists(build_file) or not os.path.isfile(build_file):
        print("Error: Build must be a (zip) file.", file=sys.stderr)
        return

    conn = S3Connection()
    bucket = conn.get_bucket(bucket_name)

    remote_file = "%s/build.zip" % project_name

    remote_key = Key(bucket)
    remote_key.name = remote_file
    print("Uploading file %s -> %s" % (build_file, remote_key.name))
    remote_key.set_contents_from_filename(build_file)
    def __s3_upload__(self):
        """
        upload the file to s3
        see http://boto.cloudhackers.com/en/latest/s3_tut.html
        :return:
        """
        # s3 = boto3.resource('s3')
        s3,_ = self.__s3_connect__()

        aws_tar = self.__get_aws_tar_name__()

        b = s3.get_bucket('zooniverse-static')

        key_str = "panoptes-uploads.zooniverse.org/production/project_aggregations_export/"+aws_tar

        s3_key = Key(b)
        s3_key.key = key_str

        if not os.path.exists("/tmp/"+aws_tar):
            print("warning the tar file does not exist - creating an temporary one.")
            panoptes_file = open("/app/config/aggregation.yml","rb")
            api_details = yaml.load(panoptes_file)

            rollbar_token = api_details[self.environment]["rollbar"]
            rollbar.init(rollbar_token,self.environment)
            rollbar.report_message('the tar file does not exist', 'warning')
            with open("/tmp/"+aws_tar,"w") as f:
                f.write("")

        s3_key.set_contents_from_filename("/tmp/"+aws_tar)
示例#29
0
 def put_backup_in_s3(self, type, filename):
     """Puts a backup file into s3"""
     k = Key(self.s3bucket)
     k.key = _get_backup_path(self.appid, type)
     _log.info("Putting %s into s3" % k.key)
     k.set_metadata("backup_date", script_start_time.strftime("%s"))
     k.set_contents_from_filename(filename)
示例#30
0
def _write_files(app, static_url_loc, static_folder, files, bucket,
                 ex_keys=None, hashes=None):
    """ Writes all the files inside a static folder to S3. """
    new_hashes = []
    for file_path in files:
        asset_loc = _path_to_relative_url(file_path)
        key_name = _static_folder_path(static_url_loc, static_folder,
                                       asset_loc)
        msg = "Uploading %s to %s as %s" % (file_path, bucket, key_name)
        logger.debug(msg)

        exclude = False
        if app.config.get('S3_ONLY_MODIFIED', False):
            file_hash = hash_file(file_path)
            new_hashes.append((key_name, file_hash))

            if hashes and hashes.get(key_name, None) == file_hash:
                exclude = True

        if ex_keys and key_name in ex_keys or exclude:
            logger.debug("%s excluded from upload" % key_name)
        else:
            k = Key(bucket=bucket, name=key_name)
            # Set custom headers
            for header, value in app.config['S3_HEADERS'].iteritems():
                k.set_metadata(header, value)
            k.set_contents_from_filename(file_path)
            k.make_public()

    return new_hashes
示例#31
0
def upload_installer(fn, prefix, include_date, is_dev_build, extension,
                     date_string):

    s3 = boto.connect_s3()

    if extension == '.exe':
        if is_dev_build:
            b = s3.get_bucket('dyn-builds-dev')
        else:
            b = s3.get_bucket('dyn-builds-data')
    elif extension == '.sig':
        if is_dev_build:
            b = s3.get_bucket('dyn-builds-dev-sig')
        else:
            b = s3.get_bucket('dyn-builds-data-sig')

    k = Key(b)

    key = 'daily/' + prefix + '.' + date_string + extension if include_date else 'daily/' + prefix + extension

    k.key = os.path.basename(key)

    k.set_contents_from_filename(fn, cb=report_progress, num_cb=40)
示例#32
0
def get_image():
	image_b64 = request.values['imageBase64']
	image_encoded = image_b64.split(',')[1]
	image = base64.decodebytes(image_encoded.encode('utf-8'))
	'digit1-O_n1'.split('n')
	drawn_digit = request.values['digit']
	type = 'O'
	filename = 'digit' + str(drawn_digit) + '-' + type + str(uuid.uuid1()) + '.jpg'
	with open('tmp/' + filename, 'wb') as f:
		f.write(image)

	REGION_HOST = 's3-external-1.amazonaws.com'

	conn = S3Connection(os.environ['AWSAccessKeyId'], os.environ['AWSSecretKey'], host=REGION_HOST)
	bucket = conn.get_bucket('nikitinandrews')

	k = Key(bucket)
	key = filename
	fn = 'tmp/' + filename
	k.key = key
	k.set_contents_from_filename(fn)
	print('Done')
	return filename
示例#33
0
    def handle(self, *args, **options):

        my_logger = fcc_logger()
        my_logger.info("starting backup run...")

        conn = S3Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
        b = conn.get_bucket('politicaladsleuth-assets')
        k = Key(b)

        pdfs_to_backup = PDF_File.objects.filter(
            local_file_path__isnull=False, is_backed_up=False).values('id')

        num_to_process = len(pdfs_to_backup)

        print "Processing %s files" % num_to_process
        count = 0

        for this_pdf_id in pdfs_to_backup:
            this_pdf = PDF_File.objects.get(pk=this_pdf_id['id'])

            if this_pdf.is_backed_up:
                print "already backed up!"
                continue

            count += 1
            if (count % 100 == 0):
                print "Processed %s" % count
            local_file_path = this_pdf.local_file_path
            full_file_path = SCRAPER_LOCAL_DOC_DIR + "/" + local_file_path
            #print "path is: %s" % full_file_path

            local_file_path = local_file_path.replace("%%", "/")
            s3_string = "media/fcc_backup/%s" % local_file_path
            #print "s3 destination is: %s" % s3_string

            k.key = s3_string
            try:
                result = k.set_contents_from_filename(full_file_path,
                                                      policy='public-read')
            except:
                tb = traceback.format_exc()
                message = "*BACKUP ERROR:* Error uploading %s\n%s" % (
                    local_file_path, tb)
                print message
                my_logger.warn(message)

                continue
            this_pdf.is_backed_up = True
            this_pdf.s3_full_url = s3_string
            this_pdf.save()
示例#34
0
def s3(request):
    AWS_ACCESS_KEY_ID = 'AKIAJX4LF2MP4X2XUANQ'
    AWS_SECRET_ACCESS_KEY = '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'

    bucket_name = AWS_ACCESS_KEY_ID.lower() + '-dump'
    conn = boto.connect_s3(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)

    try:
        bucket = conn.get_bucket(bucket_name)
    except:
        bucket = conn.create_bucket(
            bucket_name, location=boto.s3.connection.Location.DEFAULT)

    testfile = "media/data.csv"

    def percent_cb(complete, total):
        sys.stdout.write('.')
        sys.stdout.flush()

    k = Key(bucket)
    k.key = 'my test file'
    k.set_contents_from_filename(testfile, cb=percent_cb, num_cb=10)
    return HttpResponse('Success')
示例#35
0
def upload(abbr, filename):
    today = datetime.date.today()

    # build URL
    s3_bucket = settings.AWS_BUCKET
    s3_path = '%s-%02d-%02d-%s-csv.zip' % (today.year, today.month, today.day,
                                           abbr)
    s3_url = 'http://%s.s3.amazonaws.com/%s' % (s3_bucket, s3_path)

    # S3 upload
    s3conn = boto.connect_s3(settings.AWS_KEY, settings.AWS_SECRET)
    bucket = s3conn.create_bucket(s3_bucket)
    k = Key(bucket)
    k.key = s3_path
    k.set_contents_from_filename(filename)
    k.set_acl('public-read')

    meta = metadata(abbr)
    meta['latest_csv_url'] = s3_url
    meta['latest_csv_date'] = datetime.datetime.utcnow()
    db.metadata.save(meta, safe=True)

    print('uploaded to %s' % s3_url)
    def upload_image(self, tmp_path, dest_filename, content_type):
        bucket_name = self.config.get('aws', 'bucket_name')
        access_key = self.config.get('aws', 'access_key')
        secret_access_key = self.config.get('aws', 'secret_access_key')
        url_prefix = self.config.get('aws', 'url_prefix')

        dest_path = os.path.join(url_prefix, dest_filename)
        url = 'http://s3.amazonaws.com/%s/%s' % (bucket_name, dest_path)

        conn = S3Connection(access_key, secret_access_key)
        bucket = conn.create_bucket(bucket_name)
        key = Key(bucket)
        key.key = dest_path
        try:
            key.set_contents_from_filename(tmp_path,
                                           policy='public-read',
                                           headers={'Content-Type': content_type,
                                                    'Max-Age': 604800 })
            log.notice("Uploaded image {0} to {1}", tmp_path, url)
            return url
        except IOError as e:
            log.warn("Failed to upload image {0} to {1} because {2}", tmp_path, url, str(e))
            return None
示例#37
0
	def run(self):
		while True:
			try:
				if self.done_count % 1000 == 0:  # re-init conn to s3 every 1000 copies as we get failures sometimes
					self.__init_s3()
				path = self.queue.get()
				key_name = path.replace('\\', '/')
				filename = os.path.join(self.root_path, path)
				key = self.s3_b.get_key(key_name)

				if not key or not key.exists():
					print '  t%s: uploading: %s' % (self.thread_id, key_name)
					key = Key(self.s3_b)
					key.key = key_name
					key.set_metadata('Content-Type', 'image/jpeg')
					key.set_contents_from_filename(filename, policy='public-read', reduced_redundancy=True)
				else:
					print '  t%s: exists already: %s' % (self.thread_id, key_name)

				self.done_count += 1
			except BaseException:
				print '  t%s: error during upload' % self.thread_id)
			self.queue.task_done()
def upload_image_to_s3(image_file_path, bucket_name):
    """Uploads images to Amazon's S3 service.

	Arguments:

	image_file_path: Path to image to upload on local machine.
	bucket_name: Name of the S3 bucket where file should be uploaded.
	key_name: Name of the key for the file on S3 (usually the
			timestamp).

	"""
    print("Entered s3 upload...")
    print(bucket_name)
    print(image_file_path)
    bucket = s3_connection.get_bucket(bucket_name)

    # Create a new key using image_file_path as the key

    key = Key(bucket)
    key.key = image_file_path
    key.set_contents_from_filename(image_file_path)

    return key
    def test_init(self):
        """ Test that TextExtractionS3 correctly initalizes and saves
        document from s3 to local temp repo and vice versa """

        # Create mock connection to s3 and add pdf
        conn = boto.connect_s3()
        conn.create_bucket('testbucket')
        s3_bucket = conn.get_bucket('testbucket')
        k = Key(s3_bucket)
        k.key = 'testfile.pdf'
        k.set_contents_from_filename(
            os.path.join(LOCAL_PATH, 'fixtures/record_no_text.pdf'))
        returned_file = conn.get_bucket('testbucket').get_key('testfile.pdf')
        self.assertEqual('testfile.pdf', returned_file.name)

        # Init TextExtractionS3 and assert that s3 file was downloaded
        self.extractor = TextExtractionS3(file_key='testfile.pdf',
                                          s3_bucket=s3_bucket)
        self.assertTrue(os.path.exists(self.extractor.doc_path))

        self.extractor.extract_metadata()
        item = list(self.extractor.s3_bucket.list('testfile_metadata.json'))
        self.assertEqual(item[0].name, 'testfile_metadata.json')
示例#40
0
def upload_file(bucket_name,
                dest_path,
                src_path,
                public=False,
                content_type='application/octet-stream',
                download_filename=None):
    bucket = _buckets[bucket_name]
    key = Key(bucket)
    key.key = dest_path

    if public:
        policy = 'public-read'
    else:
        policy = 'private'

    headers = {'Content-Type': content_type}

    if download_filename is not None:
        headers[
            'Content-Disposition'] = 'attachment;filename="%s"' % download_filename.replace(
                ' ', '_')

    key.set_contents_from_filename(src_path, policy=policy, headers=headers)
示例#41
0
def send_files(files_to_send, key, bucket, directory):

    output_file = config.get("local_settings", "output_file")
    f = zipfile.ZipFile(output_file, "w")
    for name in files_to_send:
        f.write(name, os.path.basename(name), zipfile.ZIP_DEFLATED)
    f.close()
    setup_headers = {
        "Content-type": "application/json",
        "Accept": "text/plain"
    }
    conn = httplib.HTTPSConnection("px558.o1.gondor.io")
    conn.request("GET", "/api/data/upload-request/", '', setup_headers)
    response = conn.getresponse()
    aws_setup = json.loads(response.read())
    access_val = aws_setup["AWSAccessKeyId"]
    conn = S3Connection(access_val, key)
    b = conn.create_bucket(bucket)
    k = Key(b)
    k.key = directory + output_file
    print "sending files"
    k.set_contents_from_filename(output_file)
    conn.close()
    def test_text_extractor_s3(self):

        # Open mock connection
        conn = boto.connect_s3()
        conn.create_bucket('testbucket')
        s3_bucket = conn.get_bucket('testbucket')

        # Upload files to mock bucket
        k = Key(s3_bucket)
        k.key = 'record_text.pdf'
        k.set_contents_from_filename(os.path.join(LOCAL_PATH,
                                                  'fixtures/record_text.pdf'),
                                     replace=True)
        k.key = 'excel_spreadsheet.xlsx'
        k.set_contents_from_filename(os.path.join(
            LOCAL_PATH, 'fixtures/excel_spreadsheet.xlsx'),
                                     replace=True)

        # Convert
        text_extractor_s3('record_text.pdf', s3_bucket)
        self.assertEqual(len(list(s3_bucket.list('record_text.txt'))), 1)
        text_extractor_s3('excel_spreadsheet.xlsx', s3_bucket)
        self.assertEqual(len(list(s3_bucket.list('excel_spreadsheet.txt'))), 1)
示例#43
0
def upload_to_s3(bucket_name, key_name, video_file):
    cfg = Config()
    # connect to the bucket
    conn = boto.connect_s3(cfg.get("aws", "access_key_id"),
                           cfg.get("aws", "secret_access_key"))

    ret_val = False

    try:
        print("# S3: Uploading to Bucket: {0} / Video|Key: {1}".format(
            bucket_name, video_file))
        bucket = conn.get_bucket(bucket_name)
        k = Key(bucket)
        if key_name:
            k.key = key_name
        else:
            k.key = os.path.basename(video_file)
        k.set_contents_from_filename(video_file)
        ret_val = True
    except boto.exception.S3ResponseError as err:
        print(err)

    return ret_val
示例#44
0
def upload_file(source_file, s3_key_name, bucket, nr, count):

    with_gzip = source_file.endswith(gzip_exts)

    print('  [{} of {}] {}{}'.format(nr, count, 'GZIP ' if with_gzip else '',
                                     s3_key_name))

    s3_key = Key(bucket)
    s3_key.key = s3_key_name
    s3_key.set_metadata(
        'Content-Type',
        content_types.get(os.path.splitext(source_file)[1]) or 'text/plain')

    if 'public/libs' in source_file or source_file.endswith(
            cache_max_age_exts):
        s3_key.set_metadata('Cache-Control',
                            'public, max-age={}'.format(cache_max_age))

    if with_gzip:
        s3_key.set_metadata('Content-Encoding', 'gzip')
        source_file = gzip_file(source_file, s3_key_name)

    s3_key.set_contents_from_filename(source_file)
示例#45
0
    def upload(self, generated_images):
        """
        Upload auto generated images to S3.
        """
        if self.settings['onsite_worker'] is True:
            s3_connection = S3Connection(
                self.settings['edx_access_key_id'],
                self.settings['edx_secret_access_key']
            )
        else:
            s3_connection = S3Connection()

        try:
            bucket = s3_connection.get_bucket(self.settings['aws_video_images_bucket'])
        except S3ResponseError:
            logger.error(': Invalid Storage Bucket for Video Images')
            return

        image_keys = []
        for generated_image in generated_images:
            upload_key = Key(bucket)
            upload_key.key = build_url(
                self.settings['instance_prefix'],
                self.settings['aws_video_images_prefix'],
                os.path.basename(generated_image)
            )
            # image path is stored in edxval without `instance_prefix`
            image_keys.append(
                build_url(
                    self.settings['aws_video_images_prefix'],
                    os.path.basename(generated_image)
                )
            )
            upload_key.set_contents_from_filename(generated_image)
            upload_key.set_acl('public-read')

        return image_keys
def main():
    Model = get_model('cotizador', sys.argv[1])

    if not Model:
        print 'Model not found'
        return

    bucket_name = sys.argv[2]
    dir_name = sys.argv[3]

    if len(sys.argv) >= 5:
        min_id = int(sys.argv[4])
    else:
        min_id = 0

    conn = S3Connection(settings.AWS_ACCESS_KEY_ID,
                        settings.AWS_SECRET_ACCESS_KEY)
    bucket = conn.get_bucket(bucket_name)

    print 'min_id: ' + str(min_id)

    base_path = settings.MEDIA_ROOT

    models = Model.objects.filter(pk__gt=min_id).order_by('id')

    c = models.count()

    for idx, n in enumerate(models):
        print '{0} de {1}: {2}'.format(idx + 1, c, n.id)
        k = Key(bucket)
        k.key = dir_name + '/' + n.picture.name.split('/')[1]
        full_path = base_path + '/' + n.picture.name

        print k.key
        print full_path

        k.set_contents_from_filename(full_path)
示例#47
0
    def create_upload_session(self, upload_file):
        """
        Creates an upload session from the file.
        """
        upload = UploadedData.objects.create(user=self.request.user, state='UPLOADED', complete=True)
        upload_file.upload = upload
        upload_file.save()
        upload.size = upload_file.file.size
        upload.name = upload_file.name
        upload.file_type = self.get_file_type(upload_file.file.path)
        upload.save()

        bucket_name = getattr(settings, 'OSGEO_STORAGE_BUCKET_NAME', None)
        if bucket_name:
            conn = boto.connect_s3()
            bucket = conn.get_bucket(bucket_name)
            u = uuid.uuid4()
            k = Key(bucket)
            k.key = 'osgeo_importer/{}{}'.format(u, os.path.splitext(upload_file.file.path)[1])
            k.set_contents_from_filename(upload_file.file.path)
            conn.close()

            upload.metadata = '{}:{}'.format(bucket_name, k.key)
            upload.save()

        description = self.get_fields(upload_file.file.path)

        for layer in description:
            configuration_options = DEFAULT_LAYER_CONFIGURATION.copy()
            configuration_options.update({'index': layer.get('index')})
            upload.uploadlayer_set.add(UploadLayer(name=layer.get('name'),
                                                   fields=layer.get('fields', {}),
                                                   index=layer.get('index'),
                                                   feature_count=layer.get('feature_count'),
                                                   configuration_options=configuration_options))
        upload.save()
        return upload
示例#48
0
def write_plot_to_s3(fig_path, fig_name):
    '''
     Writes the plot to s3 bucket from mentioned dir path and
     returns the s3 url
    '''

    import boto
    import sys
    from boto.s3.key import Key
    # from boto.s3.key import Key
    bucket_name = 'mlsquare-pdf'
    credentials_json = json.load(open('../data/aws_credentials.json'))
    AWS_ACCESS_KEY_ID = credentials_json['AWS_ACCESS_KEY_ID']
    AWS_SECRET_ACCESS_KEY = credentials_json['AWS_SECRET_ACCESS_KEY']
    REGION_HOST = 's3.ap-south-1.amazonaws.com'

    # bucket_name = AWS_ACCESS_KEY_ID.lower() + '-dump'
    conn = boto.connect_s3(AWS_ACCESS_KEY_ID,
                           AWS_SECRET_ACCESS_KEY,
                           host=REGION_HOST)
    bucket = conn.get_bucket('mlsquare-pdf', validate=False)

    # bucket = conn.create_bucket(bucket_name,
    #     location=boto.s3.connection.Location.DEFAULT)

    print('Uploading %s to Amazon S3 bucket %s' % (fig_path, bucket_name))

    def percent_cb(complete, total):
        sys.stdout.write('.')
        sys.stdout.flush()

    k = Key(bucket)
    k.key = fig_name
    k.set_contents_from_filename(fig_path, cb=percent_cb,
                                 num_cb=10)  # upload file
    url = k.generate_url(expires_in=0, query_auth=False)
    return url
示例#49
0
def push_cf_templates_to_s3(template_dir='cloudformation'):
    """ Pushes PD Cloud Formation templates up to AWS S3 Buckets.

     Cloud Formation uses S3 for template storage. Cloud Formation and it's backing S3 storage is region specific.
     This task publishes template updates to the backing S3 buckets. Use this function to "publish" new template
     updates and fixes.

     parameters:
     template_dir -- The local directory where the Cloud Formation templates are stored
    """

    abs_template_dir = os.path.abspath(template_dir)

    templates = []

    for file in os.listdir(abs_template_dir):

        if 'cf-template.json' != file:
            templates.append(os.path.join(abs_template_dir, file))

    print('Uploading templates:')
    print(templates)

    s3_conn = S3Connection(config.AWS_API_KEY, config.AWS_SECRET_KEY)

    for bucket_name in config.AWS_CF_S3_BUCKETS:
        bucket = s3_conn.get_bucket(bucket_name)

        for k in bucket.get_all_keys():
            k.delete()

        for t in templates:
            k = Key(bucket)
            k.key = os.path.basename(t)
            k.set_contents_from_filename(os.path.abspath(t))

    print('Upload complete')
示例#50
0
def main(argv):

    ## PARAM OVERRIDES
    KurmaAWSTestLib.GLOBAL_DEBUG = 1
    bucket_name = 'readafterwrite003kurmaeu'
    DATAFIL = 'data/data8m'

    ret = KurmaAWSTestLib.fetchArgs(argv)
    if (ret == -1):
        sys.exit(2)

    userObj = boto.s3.connect_to_region(
        'eu-west-1',
        aws_access_key_id=KurmaAWSTestLib.user_profiles[0]['access'],
        aws_secret_access_key=KurmaAWSTestLib.user_profiles[0]['secret'],
        calling_format=boto.s3.connection.OrdinaryCallingFormat())

    bucket = userObj.get_bucket(bucket_name)
    k = Key(bucket)
    for i in range(1, 31):
        k.key = 'testobj' + str(i)
        k.set_contents_from_filename(DATAFIL)
        k.set_acl('public-read')
        print("Wrote testobj" + str(i) + " at: " + str(datetime.now()))

    # Deletion loop
    #for i in range(1, 11):
    #    k.key = 'testobj' + str(i)
    #    k.delete()
    #    print ("Deleted " + str(i))
    #    time.sleep(1)

    #print ("Deleting all objects...")
    #for k in bucket.list():
    #    k.delete()

    return
示例#51
0
def bucket(link, dest, config):

    #-------- Begin Connection --------#
    conn = boto.connect_s3(aws_access_key_id=config["accessKey"],
                           aws_secret_access_key=config["secretKey"])

    if printBucket:
        print "I have successfully connected to the S3 space where the buckets live"
        print

        #-------- List Active Bucscrape(
        print "Now here is the list of the buckets:"
        print

        for bucket in conn.get_all_buckets():
            print "{name}\t{created}".format(name=bucket.name,
                                             created=bucket.creation_date)

    #-------- List Contents of Buckets --------#

    if printBucket:
        print
        print "This is the list of the stuff in the buckets:"
        print
        for key in bucket.list():
            print bucket.name, ":", key.name, key.size, key.last_modified

    #-------- Save to Bucket --------#
    b = conn.get_bucket(config["testBucketName"])  # Connect to our test bucket
    k = Key(b)  #  Prepare to create a new file - 'key' means 'filename'
    k.key = dest  # new Filename
    k.set_contents_from_filename(link)  # source file

    #-------- Download Bucket Object --------#
    #k = bucket.get_key({NAME OF FILE})
    #k.get_contents_to_filename({SAVE FILE PATH})
    return
    def run(self):
        #To ADD
        """#Taking command line input from user
        argLen=len(sys.argv)
        accessKey=''
        secretAccessKey=''

        for i in range(1,argLen):
            val=sys.argv[i]
            if val.startswith('accessKey='):
                pos=val.index("=")
                accessKey=val[pos+1:len(val)]
                continue
            elif val.startswith('secretKey='):
                pos=val.index("=")
                secretAccessKey=val[pos+1:len(val)]
                continue"""
        
        # Get Access keys from command line 
        accessKey = self.accessKey
        secretAccessKey = self.secretAccessKey

        try:
            #Creating S3 Connection using Access and Secrect access key
            conn = S3Connection(accessKey, secretAccessKey)
            # Connecting to specified bucket
            b = conn.get_bucket('case3')
            #Initializing Key
            k = Key(b)
            #Uploading pickle and model performance files to S3 Bucket
            onlyfiles = pd.read_csv(run_models().output().path)['0'].tolist()
            for i in onlyfiles:
                k.key = i
                k.set_contents_from_filename(i)
                k.set_acl('public-read')
        except:
            print("Amazon credentials or location is invalid")
示例#53
0
def upload_run_list(run_id, run_filename, scene_list_filename, collection=True, verbose=False):
    run_info_key = _get_key(RUN_INFO_FILE)
    run_info = json.loads(run_info_key.get_contents_as_string())

    if run_info['last_run'] != run_id-1 or run_info['active_run'] is None:
        raise Exception('We are not the active run! ' + str(run_info))
        
    if socket.gethostname() not in run_info['active_run']:
        raise Exception('We are not the active run host! ' + str(run_info))

    if verbose:
        print 'Confirmed we are the active run: ', str(run_info)

    run_info['last_run'] = run_id
    run_info['active_run'] = None

    run_s3_name = 'runs/%s.csv' % run_id

    run_key = Key(_get_bucket(), run_s3_name)
    run_key.set_contents_from_filename(run_filename, policy='public-read')

    if verbose:
        print 'Uploaded run log %s to %s on s3.' % (run_filename, run_s3_name)

    os.system('gzip -f -9 %s' % scene_list_filename)

    keypath = 'c1/L8/scene_list.gz' if collection else 'scene_list.gz'
    key = _get_key(keypath)
    key.set_contents_from_filename(scene_list_filename + '.gz', policy='public-read')

    if verbose:
        print 'Uploaded %s to scene_list.gz' % scene_list_filename

    run_info_key.set_contents_from_string(json.dumps(run_info))

    if verbose:
        print 'last run incremented, active_run over.'
示例#54
0
def upload_packages(packages, bucket_name=None, nightly=False):
    if debug:
        print("[DEBUG] upload_packags: {}".format(packages))
    try:
        import boto
        from boto.s3.key import Key
    except ImportError:
        print "!! Cannot upload packages without the 'boto' python library."
        return 1
    print("Uploading packages to S3...")
    print("")
    c = boto.connect_s3()
    if bucket_name is None:
        bucket_name = 'get.influxdb.org/telegraf'
    bucket = c.get_bucket(bucket_name.split('/')[0])
    print("\t - Using bucket: {}".format(bucket_name))
    for p in packages:
        if '/' in bucket_name:
            # Allow for nested paths within the bucket name (ex:
            # bucket/telegraf). Assuming forward-slashes as path
            # delimiter.
            name = os.path.join('/'.join(bucket_name.split('/')[1:]),
                                os.path.basename(p))
        else:
            name = os.path.basename(p)
        if bucket.get_key(name) is None or nightly:
            print("\t - Uploading {} to {}...".format(name, bucket_name))
            k = Key(bucket)
            k.key = name
            if nightly:
                n = k.set_contents_from_filename(p, replace=True)
            else:
                n = k.set_contents_from_filename(p, replace=False)
            k.make_public()
        else:
            print("\t - Not uploading {}, already exists.".format(p))
    print("")
示例#55
0
def thumbnails_2(name, type, prefix, width, bytes):
    bucket = S3Connection(settings.AWS_ACCESS_KEY_ID,
                          settings.AWS_SECRET_ACCESS_KEY).get_bucket(
                              settings.AWS_BUCKET)
    if not name:
        logger.critical('{name:s}: if not name (#1)'.format(name=name))
        return
    name = name.split('/')[-1]
    if not name:
        logger.critical('{name:s}: if not name (#2)'.format(name=name))
        return
    key = bucket.get_key(name)
    if not key:
        logger.critical('{name:s}: if not key'.format(name=name))
        return
    n = '{prefix:s}_{suffix:s}'.format(prefix=prefix, suffix=name)
    k = bucket.get_key(n)
    if k:
        logger.info('{name:s}: Success (#1)'.format(name=n))
        return
    _, source = mkstemp()
    key.get_contents_to_filename(source)
    destination = None
    try:
        destination = get_destination(source, name, type, width, bytes)
    except Exception:
        client.captureException()
    if not destination:
        logger.critical('{name:s}: Failure'.format(name=n))
        return
    k = Key(bucket)
    k.key = n
    k.set_contents_from_filename(destination)
    remove(destination)
    remove(source)
    logger.info('{name:s}: Success (#2)'.format(name=n))
    return
示例#56
0
def _write_files(app,
                 static_url_loc,
                 static_folder,
                 files,
                 bucket,
                 ex_keys=None,
                 hashes=None):
    """ Writes all the files inside a static folder to S3. """
    new_hashes = []
    static_folder_rel = _path_to_relative_url(static_folder)
    for file_path in files:
        asset_loc = _path_to_relative_url(file_path)
        key_name = _static_folder_path(static_url_loc, static_folder_rel,
                                       asset_loc)
        msg = "Uploading %s to %s as %s" % (file_path, bucket, key_name)
        logger.debug(msg)

        exclude = False
        if app.config.get('S3_ONLY_MODIFIED', False):
            file_hash = hash_file(file_path)
            new_hashes.append((key_name, file_hash))

            if hashes and hashes.get(key_name, None) == file_hash:
                exclude = True

        if ex_keys and key_name in ex_keys or exclude:
            logger.debug("%s excluded from upload" % key_name)
        else:
            k = Key(bucket=bucket, name=key_name)
            # Set custom headers
            for header, value in app.config['S3_HEADERS'].iteritems():
                k.set_metadata(header, value)
            k.set_contents_from_filename(file_path)
            k.make_public()

    return new_hashes
示例#57
0
文件: file.py 项目: pedritss/poppet
 def push_file_to_s3(self, full_file_name, rel_filepath):
     try:
         import boto
         from boto.s3.key import Key
         bucket_name = current_app.config['S3_BUCKET']
         # connect to the bucket
         conn = boto.connect_s3(current_app.config['S3_ACCESS_KEY'],
                                current_app.config['S3_SECRET_KEY'],
                                host=current_app.config['S3_ENDPOINT'])
         bucket = conn.get_bucket(bucket_name)
         # go through each version of the file
         key = rel_filepath
         # create a key to keep track of our file in the storage
         k = Key(bucket)
         k.key = key
         k.set_contents_from_filename(full_file_name)
         # we need to make it public so it can be accessed publicly
         # using a URL like http://s3.amazonaws.com/bucket_name/key
         k.make_public()
         # remove the file from the web server
         #os.remove(full_file_name)
     except:
         current_app.logger.exception(
             'exception while trying to upload file')
示例#58
0
def UploadFiles(bucket, files):
    s3 = boto.connect_s3(aws_access_key_id, aws_secret_access_key)
    b = s3.get_bucket(bucket)

    for s3Key in files:
        upload = False
        k = b.get_key(s3Key)

        if not k:
            k = Key(b)
            k.key = s3Key
            upload = True
        else:
            s3_dt = GetKeyModified(k)
            local_dt = GetFileModified(s3Key)

            #upload only if local file has a timestamp newer than that in s3 storage
            if local_dt > s3_dt:
                upload = True

        #update only if local file has a timestamp older than that in s3 storage
        if upload:
            print 'uploading %s' % k
            k.set_contents_from_filename(s3Key)
示例#59
0
def sync():
    params = {
        "aws_access_key_id": app.config["AWS_ACCESS_KEY_ID"],
        "aws_secret_access_key": app.config["AWS_ACCESS_KEY"],
    }

    if app.config.get('AWS_S3_CALLING_FORMAT'):
        params['calling_format'] = app.config['AWS_S3_CALLING_FORMAT']

    if app.config.get("AWS_REGION"):
        c = boto.s3.connect_to_region(app.config["AWS_REGION"], **params)
    else:
        c = boto.connect_s3(**params)

    b = c.get_bucket(app.config["S3_BUCKET"])
    k = Key(b)

    local_path = app.config["FREEZER_DESTINATION"]
    destination_path = app.config["S3_DESTINATION"]

    local_path = Path(local_path)
    destination_path = Path(destination_path)

    for path, file_dir, files in os.walk(local_path):
        for local_file in files:
            file_path = Path(path, local_file)
            rel_path = Path(destination_path,
                            local_path.rel_path_to(file_path))

            logger.info("- Uploading file %s" % (rel_path,))

            k.key = rel_path
            k.set_contents_from_filename(file_path)
            b.set_acl("public-read", k.key)

    logger.info("Sync complete!")
示例#60
0
    def _send(self, tempImage):
        # connect to S3 and grab the bucket
        # need to specify  host?
        s3 = S3Connection(self.conf["aws_access_key_id"],
                          self.conf["aws_secret_access_key"],
                          host='s3-us-west-1.amazonaws.com')
        bucket = s3.get_bucket(self.conf["s3_bucket"])

        # upload the file, make it public, and generate a URL for the file
        k = Key(bucket)
        k.key = tempImage.path[tempImage.path.rfind("/") + 1:]
        k.set_contents_from_filename(tempImage.path)
        k.make_public()
        url = k.generate_url(expires_in=300)

        # connect to Twilio and send the file via MMS
        client = Client(self.conf["twilio_sid"], self.conf["twilio_auth"])
        client.messages.create(to=self.conf["twilio_to"],
                               from_=self.conf["twilio_from"],
                               body=self.conf["message_body"],
                               media_url=url)

        # delete the temporary file
        tempImage.cleanup()