Exemplo n.º 1
0
 def save_gtfs_file(self, contents, user, gtfs_crawler, comments, filename):
     assert '@' in user
     # if setting is prod
     if tornado.options.options.shunt_s3:
         if filename.startswith("queue/"):
             filename = filename[len("queue/"):]
         if not os.path.exists("/tmp/gtfs_s3/queue"):
             os.makedirs("/tmp/gtfs_s3/queue")
         filename = os.path.join("/tmp/gtfs_s3/queue", filename)
         logging.info('writing %s' % filename)
         f = open(filename, 'wb')
         f.write(contents)
         f.close()
         f = open(filename + '.meta', 'wb')
         f.write(
             json.dumps(
                 dict(user=user,
                      gtfs_crawler=gtfs_crawler,
                      comments=comments)))
         f.close()
     else:
         obj = S3.S3Object(contents)
         obj.metadata['user'] = user
         obj.metadata['gtfs_crawler'] = gtfs_crawler
         obj.metadata['comments'] = comments
         logging.info('putting %r' % filename)
         self.conn.put("gtfs", filename, obj)
Exemplo n.º 2
0
def S3Upload(upload_name, fileObj, bucket_name=None):
    print 'check upload args'
    if not bucket_name:
        raise ValueError('No Bucket Name')

    print 'conn'
    conn = S3.AWSAuthConnection(config.AWS_ACCESS_KEY_ID,
                                config.AWS_SECRET_ACCESS_KEY)

    content_type = mimetypes.guess_type(upload_name)[0]
    if not content_type:
        content_type = 'text/plain'
    print 'conn put'
    st = conn.put(bucket_name, upload_name, S3.S3Object(fileObj), {
        'x-amz-acl': 'public-read',
        'Content-Type': content_type
    })
    print 'end conn put'
    resp = st.http_response
    print 'resp', resp, resp.status
    if 200 != resp.status:
        print 'upload failed'
        print resp.msg
        return False
    print 'upload successed'
    return True
Exemplo n.º 3
0
def upload_s3(fname, mimetype, uname=''):
	if not uname:
		uname = os.path.basename(fname)

	filedata = open(fname, 'rb').read()

	conn = S3.AWSAuthConnection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
	conn.put(settings.BUCKET_NAME, uname, S3.S3Object(filedata),
		{'x-amz-acl': 'public-read', 'Content-Type': mimetype})
Exemplo n.º 4
0
def publish(filepath, s3bucket, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY,
            version):
    filename = filepath.split("/")[-1]
    s3key = "/".join([p['release.type'], p['project.key'], filename])

    print "Reading in content from %s" % filepath
    filedata = open(filepath, "rb").read()

    filehash = _sha(filedata).hexdigest()

    print "Preparing to upload %s to %s/%s" % (filename, s3bucket, s3key)

    content_type = mimetypes.guess_type(filename)[0]
    if content_type is None:
        content_type = 'text/plain'

    print "File appears to be %s" % content_type

    print "Connecting to S3..."
    conn = S3.AWSAuthConnection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)

    print "Checking if bucket %s exists..." % s3bucket
    check = conn.check_bucket_exists(s3bucket)
    if (check.status == 200):
        print "Uploading %s to %s/%s" % (filename, s3bucket, s3key)
        print conn.put(
            s3bucket, s3key, S3.S3Object(filedata), {
                'Content-Type': content_type,
                'x-amz-acl': 'public-read',
                'x-amz-meta-project.name': 'Spring Python',
                'x-amz-meta-release.type': p['release.type'],
                'x-amz-meta-bundle.version': version,
                'x-amz-meta-package.file.name': filename
            }).message

        print "Uploading SHA1 digest to %s/%s" % (s3bucket, s3key + '.sha1')
        print conn.put(s3bucket, s3key + '.sha1',
                       S3.S3Object(filehash + ' ' + filename + "\n"), {
                           'Content-Type': content_type,
                           'x-amz-acl': 'public-read'
                       }).message
    else:
        print "Error code %s: Unable to publish" % check.status
Exemplo n.º 5
0
    def create(self, content, mimetype, metadata):
        key = self._generate_valid_key()
        obj = S3.S3Object(content, metadata)
        self.conn.put(self.bucket, key, obj, {
            'x-amz-storage-class': 'REDUCED_REDUNDANCY',
            'Content-Type': mimetype,
        })

        aclxml = self.conn.get_acl(self.bucket, key).body
        acl = parseString(aclxml)
        acl.getElementsByTagName('AccessControlList')[0].appendChild(_pub_read_grant)
        self.conn.put_acl(self.bucket, key, acl.toxml())
        return self._published_url(key)
Exemplo n.º 6
0
def update_s3():
    conn = S3.AWSAuthConnection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
    for line in sys.stdin:
        filename = os.path.normpath(line[:-1])
        if filename == '.' or not os.path.isfile(filename):
            continue  # Skip this, because it's not a file.
        print "Uploading %s" % filename
        filedata = open(filename, 'rb').read()
        content_type = mimetypes.guess_type(filename)[0]
        if not content_type:
            content_type = 'text/plain'
        conn.put(BUCKET_NAME, filename, S3.S3Object(filedata), {
            'x-amz-acl': 'public-read',
            'Content-Type': content_type
        })
Exemplo n.º 7
0
def push_media_to_s3(subpath, content_type):
    """
    Upload a subpath of the media directory to S3.
    """
    if not settings.USE_S3:
        return
    import S3
    conn = S3.AWSAuthConnection(settings.S3_ACCESS_KEY, settings.S3_SECRET_KEY)
    localPath = os.path.join(settings.MEDIA_ROOT, subpath)
    obj = S3.S3Object(file(localPath).read())
    tries = 5
    while True:
        try:
            conn.put(settings.S3_BUCKET, settings.S3_PATH + subpath, obj, {
                'Content-Type': content_type,
                'x-amz-acl': 'public-read'
            })
        except:
            tries -= 1
            if not tries:
                raise
        else:
            return
Exemplo n.º 8
0
def export_to_s3(request):
    """
    Dump the database and upload the dump to Amazon S3

    """
    if request.method == 'POST':
        if settings.DATABASE_ENGINE == 'mysql':
            cmd = MYSQLDUMP_CMD % (settings.DATABASE_HOST, settings.DATABASE_USER, settings.DATABASE_PASSWORD, settings.DATABASE_NAME)
        elif settings.DATABASE_ENGINE == 'sqlite3':
            cmd = SQLITE3DUMP_CMD % settings.DATABASE_NAME
        else:
            raise ImproperlyConfigured, "Sorry, django-export only supports mysql and sqlite3 database backends."
        stdin, stdout = os.popen2(cmd)
        stdin.close()
        file_name = 'dump_%s.sql.bz2' % time.strftime('%Y%m%d-%H%M')
        conn = S3.AWSAuthConnection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
        res = conn.put(settings.AWS_BUCKET_NAME, file_name, S3.S3Object(stdout.read()), {'Content-Type': 'application/x-bzip2',})
        if res.http_response.status == 200:
            request.user.message_set.create(message="%s" % _(u"%(filename)s saved on Amazon S3") % {'filename': file_name})
        else:
            request.user.message_set.create(message="%s" % _(u"Upload failed with %(status)s") % {'status': res.http_response.status})
        stdout.close()
        return HttpResponseRedirect('/admin/')
    return direct_to_template(request, 'export/export.html', {'what': _(u'Export Database to S3'), 's3support': (S3 is not None), 's3': True})
Exemplo n.º 9
0
  print '----- creating bucket -----'
  print conn.create_located_bucket(BUCKET_NAME, S3.Location.DEFAULT).message
  # to create an EU bucket
  #print conn.create_located_bucket(BUCKET_NAME, S3.Location.EU).message

print '----- bucket location -----'
print conn.get_bucket_location(BUCKET_NAME).location

print '----- listing bucket -----'
print map(lambda x: x.key, conn.list_bucket(BUCKET_NAME).entries)

print '----- putting object (with content type) -----'
print conn.put(
        BUCKET_NAME,
        KEY_NAME,
        S3.S3Object('this is a test'),
        { 'Content-Type': 'text/plain' }).message

print '----- listing bucket -----'
print map(lambda x: x.key, conn.list_bucket(BUCKET_NAME).entries)

print '----- getting object -----'
print conn.get(BUCKET_NAME, KEY_NAME).object.data

print '----- query string auth example -----'
print "\nTry this url out in your browser (it will only be valid for 60 seconds).\n"
generator.set_expires_in(60);
url = generator.get(BUCKET_NAME, KEY_NAME)
print url
print '\npress enter> ',
sys.stdin.readline()
Exemplo n.º 10
0
    def run_tests(self, calling_format, location):
        self.conn.calling_format = calling_format

        response = self.conn.create_located_bucket(BUCKET_NAME, location)
        self.assertEquals(response.http_response.status, 200, 'create bucket')

        response = self.conn.list_bucket(BUCKET_NAME)
        self.assertEquals(response.http_response.status, 200, 'list bucket')
        self.assertEquals(len(response.entries), 0, 'bucket is empty')

        text = 'this is a test'
        key = 'example.txt'

        response = self.conn.put(BUCKET_NAME, key, text)
        self.assertEquals(response.http_response.status, 200, 'put with a string argument')

        response = \
            self.conn.put(
                    BUCKET_NAME,
                    key,
                    S3.S3Object(text, {'title': 'title'}),
                    {'Content-Type': 'text/plain'})

        self.assertEquals(response.http_response.status, 200, 'put with complex argument and headers')

        response = self.conn.get(BUCKET_NAME, key)
        self.assertEquals(response.http_response.status, 200, 'get object')
        self.assertEquals(response.object.data, text, 'got right data')
        self.assertEquals(response.object.metadata, { 'title': 'title' }, 'metadata is correct')
        self.assertEquals(int(response.http_response.getheader('Content-Length')), len(text), 'got content-length header')

        title_with_spaces = " \t  title with leading and trailing spaces     "
        response = \
            self.conn.put(
                    BUCKET_NAME,
                    key,
                    S3.S3Object(text, {'title': title_with_spaces}),
                    {'Content-Type': 'text/plain'})

        self.assertEquals(response.http_response.status, 200, 'put with headers with spaces')

        response = self.conn.get(BUCKET_NAME, key)
        self.assertEquals(response.http_response.status, 200, 'get object')
        self.assertEquals(
                response.object.metadata,
                { 'title': title_with_spaces.strip() },
                'metadata with spaces is correct')

        # delimited list tests
        inner_key = 'test/inner.txt'
        last_key = 'z-last-key.txt'
        response = self.conn.put(BUCKET_NAME, inner_key, text)
        self.assertEquals(response.http_response.status, 200, 'put inner key')

        response = self.conn.put(BUCKET_NAME, last_key, text)
        self.assertEquals(response.http_response.status, 200, 'put last key')

        response = self.do_delimited_list(BUCKET_NAME, False, {'delimiter': '/'}, 2, 1, 'root list')

        response = self.do_delimited_list(BUCKET_NAME, True, {'max-keys': 1, 'delimiter': '/'}, 1, 0, 'root list with max keys of 1', 'example.txt')

        response = self.do_delimited_list(BUCKET_NAME, True, {'max-keys': 2, 'delimiter': '/'}, 1, 1, 'root list with max keys of 2, page 1', 'test/')

        marker = response.next_marker

        response = self.do_delimited_list(BUCKET_NAME, False, {'marker': marker, 'max-keys': 2, 'delimiter': '/'}, 1, 0, 'root list with max keys of 2, page 2')

        response = self.do_delimited_list(BUCKET_NAME, False, {'prefix': 'test/', 'delimiter': '/'}, 1, 0, 'test/ list')

        response = self.conn.delete(BUCKET_NAME, inner_key)
        self.assertEquals(response.http_response.status, 204, 'delete %s' % inner_key)

        response = self.conn.delete(BUCKET_NAME, last_key)
        self.assertEquals(response.http_response.status, 204, 'delete %s' % last_key)


        weird_key = '&=//%# ++++'

        response = self.conn.put(BUCKET_NAME, weird_key, text)
        self.assertEquals(response.http_response.status, 200, 'put weird key')

        response = self.conn.get(BUCKET_NAME, weird_key)
        self.assertEquals(response.http_response.status, 200, 'get weird key')

        response = self.conn.get_acl(BUCKET_NAME, key)
        self.assertEquals(response.http_response.status, 200, 'get acl')

        acl = response.object.data

        response = self.conn.put_acl(BUCKET_NAME, key, acl)
        self.assertEquals(response.http_response.status, 200, 'put acl')

        response = self.conn.get_bucket_acl(BUCKET_NAME)
        self.assertEquals(response.http_response.status, 200, 'get bucket acl')

        bucket_acl = response.object.data

        response = self.conn.put_bucket_acl(BUCKET_NAME, bucket_acl)
        self.assertEquals(response.http_response.status, 200, 'put bucket acl')

        response = self.conn.get_bucket_acl(BUCKET_NAME)
        self.assertEquals(response.http_response.status, 200, 'get bucket logging')

        bucket_logging = response.object.data

        response = self.conn.put_bucket_acl(BUCKET_NAME, bucket_logging)
        self.assertEquals(response.http_response.status, 200, 'put bucket logging')

        response = self.conn.list_bucket(BUCKET_NAME)
        self.assertEquals(response.http_response.status, 200, 'list bucket')
        entries = response.entries
        self.assertEquals(len(entries), 2, 'got back right number of keys')
        # depends on weird_key < key
        self.assertEquals(entries[0].key, weird_key, 'first key is right')
        self.assertEquals(entries[1].key, key, 'second key is right')

        response = self.conn.list_bucket(BUCKET_NAME, {'max-keys': 1})
        self.assertEquals(response.http_response.status, 200, 'list bucket with args')
        self.assertEquals(len(response.entries), 1, 'got back right number of keys')

        for entry in entries:
            response = self.conn.delete(BUCKET_NAME, entry.key)
            self.assertEquals(response.http_response.status, 204, 'delete %s' % entry.key)

        response = self.conn.list_all_my_buckets()
        self.assertEquals(response.http_response.status, 200, 'list all my buckets')
        buckets = response.entries

        response = self.conn.delete_bucket(BUCKET_NAME)
        self.assertEquals(response.http_response.status, 204, 'delete bucket')

        response = self.conn.list_all_my_buckets()
        self.assertEquals(response.http_response.status, 200, 'list all my buckets again')

        self.assertEquals(len(response.entries), len(buckets) - 1, 'bucket count is correct')
Exemplo n.º 11
0
    print resp.message
    print resp.http_response
    print resp.http_response.status
    # to create an EU bucket
    #print conn.create_located_bucket(BUCKET_NAME, S3.Location.EU).message

print '----- bucket location -----'
print conn.get_bucket_location(BUCKET_NAME).location

print '----- listing bucket -----'
print map(lambda x: x.key, conn.list_bucket(BUCKET_NAME).entries)

sys.exit(0)

print '----- putting object (with content type) -----'
print conn.put(BUCKET_NAME, KEY_NAME, S3.S3Object('this is a test'), {
    'Content-Type': 'text/plain'
}).message

print '----- listing bucket -----'
print map(lambda x: x.key, conn.list_bucket(BUCKET_NAME).entries)

print '----- getting object -----'
print conn.get(BUCKET_NAME, KEY_NAME).object.data

print '----- query string auth example -----'
print "\nTry this url out in your browser (it will only be valid for 60 seconds).\n"
generator.set_expires_in(60)
url = generator.get(BUCKET_NAME, KEY_NAME)
print url
print '\npress enter> ',