コード例 #1
0
def upload_notes_build_directory_to_s3(config):
    """ !!AI TODO need a manifest, and a saner way of tracking files."""

    logger = logging.getLogger("%s.upload_notes_build_directory_to_s3" % APP_NAME)
    logger.debug("entry. config.notes_build_directory: %s, config.notes_s3_bucket: %s" % (config.notes_build_directory, config.notes_s3_bucket))

    with contextlib.closing(boto.connect_s3()) as conn:
        with contextlib.closing(boto.connect_cloudfront()) as conn_cloudfront:
            cloudfront_distribution = [elem for elem in conn_cloudfront.get_all_distributions() if config.s3_bucket in elem.origin.dns_name][0]
            cloudfront_distribution = cloudfront_distribution.get_distribution()
            bucket = conn.get_bucket(config.notes_s3_bucket)

            output_subpaths = []
            for subpath in sorted(config.notes_input_to_output.values()):
                posix_subpath = posixpath.join(*subpath.split(os.sep))
                posix_subdirectory = posixpath.split(posix_subpath)[0]
                output_subpaths.append(subpath)
                output_subpaths.append(posixpath.join(posix_subdirectory, "_pandoc.css"))
            output_filepaths = [os.path.normpath(os.path.join(config.notes_build_directory, subpath)) for subpath in output_subpaths]

            for (subpath, filepath) in zip(output_subpaths, output_filepaths):
                logger.debug("uploading subpath: '%s'" % subpath)
                key = bucket.delete_key(subpath)
                key = bucket.new_key(subpath)
                if is_gzip_file(filepath):
                    logger.debug("mark as a gzipped file")
                    key.set_metadata("Content-Encoding", "gzip")
                key.set_contents_from_filename(filepath)
                key.make_public()

            logger.debug("creating cloudfront invalidation request")
            conn_cloudfront.create_invalidation_request(cloudfront_distribution.id, output_subpaths)
コード例 #2
0
  def do_activity(self, data = None):
    """
    Do the work
    """
    if(self.logger):
      self.logger.info('data: %s' % json.dumps(data, sort_keys=True, indent=4))
    
    self.db.connect()
    
    # cdn.elifesciences.org CDN ID
    distribution_id = self.settings.cdn_distribution_id
    
    invalidation_list = self.get_invalidation_list()
    
    # Connect to S3
    c_conn = boto.connect_cloudfront(self.settings.aws_access_key_id, self.settings.aws_secret_access_key)
    
    # Limit of 1000 URLs to invalidate at one time
    try:
      count = int(len(invalidation_list) / 1000) + 1
    except:
      # Divide by zero or something else
      return False

    array_of_invalidation_list = self.split_array(invalidation_list, count)

    for i_list in array_of_invalidation_list:
      inval_req = c_conn.create_invalidation_request(distribution_id, invalidation_list)

    if(self.logger):
      self.logger.info('LensCDNInvalidation: %s' % "")
    
    return True
コード例 #3
0
ファイル: main.py プロジェクト: kabisote/course-video-site
    def __init__(self):
        handlers = [
            (r"/", HomeHandler),
            (r"/login", LoginHandler),
            (r"/logout", LogoutHandler),
            (r"/register", RegisterHandler),
            (r"/video/([0-9]+)", VideoHandler),
            (r"/purchase/([a-zA-Z0-9-_]+)", PurchaseHandler),
            (r"/bucket", BucketHandler),
        ]
        settings = dict(
            site_title=u"Site Title",
            template_path=os.path.join(os.path.dirname(__file__), "templates"),
            static_path=os.path.join(os.path.dirname(__file__), "static"),
            static_url_prefix="https://s3-us-west-2.amazonaws.com/assets/",
            xsrf_cookies=True,
            cookie_secret="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
            login_url="/login",
            debug=False,
        )
        tornado.web.Application.__init__(self, handlers, **settings)

        self.db = torndb.Connection(
            host=options.mysql_host,
            database=options.mysql_database,
            user=options.mysql_user,
            password=options.mysql_password
        )
        
        self.cf = boto.connect_cloudfront()
コード例 #4
0
ファイル: cloudfront.py プロジェクト: chrisspen/burlap
    def get_or_create_distribution(self, s3_bucket_name):
        assert isinstance(s3_bucket_name, six.string_types)
        boto = get_boto()
        origin_dns = '%s.s3.amazonaws.com' % s3_bucket_name
        if not self.dryrun:
            conn = boto.connect_cloudfront(
                self.genv.aws_access_key_id,
                self.genv.aws_secret_access_key
            )
            origin = boto.cloudfront.origin.S3Origin(origin_dns)

            distro = None
            if self.verbose:
                # Loop over all distributions to determine whether this one exists already.
                dists = conn.get_all_distributions()
                for d in dists:
                    print('Checking existing Cloudfront distribution %s...' % d.get_distribution().config.origin.dns_name)
                    if origin_dns == d.get_distribution().config.origin.dns_name:
                        print('Found existing distribution!')
                        distro = d
                        break

                    # Necessary to avoid "Rate exceeded" errors.
                    time.sleep(0.4)

            if not distro:
                print('Creating new distribution from %s...' % origin)
                distro = conn.create_distribution(origin=origin, enabled=True)

            return distro
        else:
            print('boto.connect_cloudfront().create_distribution(%s)' % repr(origin_dns))
コード例 #5
0
ファイル: models.py プロジェクト: yosida95/YVIDEOS
    def generate_url(self, _object):
        assert isinstance(_object, Object)

        bucket = _object.s3_bucket
        resource = u'%s%s' % (
            bucket.origin,
            _object.s3_key
        )

        conn = boto.connect_cloudfront()
        dist = conn.get_distribution_info(bucket.distribution_id)
        signers = filter(lambda signer: signer.id == u'Self',
                         dist.active_signers)
        if len(signers) < 1:
            raise SignerNotFound()

        key_pair = random.choice(filter(
            lambda key_pair: key_pair is not None,
            [
                self._get_key_pair_by_id(key_pair_id, False)
                for key_pair_id in signers[0].key_pair_ids
            ]
        ))
        signed_url = dist.create_signed_url(
            resource,
            key_pair.id,
            expire_time=int(time.time() + 60 * 60 * 6),  # 6hours
            private_key_string=key_pair.private
        )
        return signed_url
コード例 #6
0
ファイル: fabfile.py プロジェクト: simonluijk/demarrer
def setup_s3(bucket_name=None):
    """ Setup s3 instance with cloudfront distribution. """
    if not bucket_name:
        bucket_name = '{0}-media'.format(PROJECT_NAME)
    conn = boto.connect_s3()
    try:
        conn.create_bucket(bucket_name, location=_Location.EU,
                           policy='public-read')
    except _S3CreateError:
        pass

    print('AWS_STORAGE_BUCKET_NAME={0}'.format(bucket_name))

    if CLOUDFRONT_ENABLED:
        try:
            origin = CLOUDFRONT_CUSTOM_ORIGIN
        except NameError:
            origin = None
        if not origin:
            origin = '{0}.s3.amazonaws.com'.format(bucket_name)

        origin = _CustomOrigin(origin, origin_protocol_policy='http-only')
        conn = boto.connect_cloudfront()
        distro = conn.create_distribution(origin=origin, enabled=True)
        print('MEDIA_DOMAIN={0}'.format(distro.domain_name))
    else:
        bucket_url = '{0}.s3.amazonaws.com'.format(bucket_name)
        print('MEDIA_DOMAIN={0}'.format(bucket_url))
    def run(self, terms, variables=None, **kwargs):
        distributionId = terms[0]
        cloudfront_conn = boto.connect_cloudfront(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
        distributionConfig = cloudfront_conn.get_distribution_config(distributionId)
        if distributionConfig:
#            zz = dir(distributionConfig)
            return [distributionConfig.etag]
        return None
コード例 #8
0
ファイル: aws.py プロジェクト: ALSEDLAH/flocker
def perform_create_cloudfront_invalidation(dispatcher, intent):
    """
    See :class:`CreateCloudFrontInvalidation`.
    """
    cf = boto.connect_cloudfront()
    distribution = [dist for dist in cf.get_all_distributions()
                    if intent.cname in dist.cnames][0]
    cf.create_invalidation_request(distribution.id, intent.paths)
コード例 #9
0
ファイル: driver.py プロジェクト: obulpathi/cdn
    def __init__(self, conf):
        super(CDNProvider, self).__init__(conf)

        self._conf.register_opts(CLOUDFRONT_OPTIONS, group=CLOUDFRONT_GROUP)
        self.cloudfront_conf = self._conf[CLOUDFRONT_GROUP]
        self.cloudfront_client = boto.connect_cloudfront(
            aws_access_key_id=self.cloudfront_conf.aws_access_key_id,
            aws_secret_access_key=self.cloudfront_conf.aws_secret_access_key)
コード例 #10
0
def main():
        distid = 'xxxxxxxxxx'
        invalidationfilepath = '/home/ec2-user/invalid.txt'
        paths  = open(invalidationfilepath,"r+")
        conn = boto.connect_cloudfront()
        inval_req = conn.create_invalidation_request(distid, paths)
        print inval_req
        touch = open(invalidationfilepath,"w")
        touch.write("")
コード例 #11
0
ファイル: storage.py プロジェクト: fieliapm/python_util
    def __init__(self, server_name, distribution_id, key_pair_id, private_key_string, aws_access_key_id=None, aws_secret_access_key=None):
        super(AmazonCloudFrontS3Storage, self).__init__(server_name)

        self.cloudfront_connection = boto.connect_cloudfront(aws_access_key_id, aws_secret_access_key)
        self.s3_connection = boto.connect_s3(aws_access_key_id, aws_secret_access_key)

        self.distribution_id = distribution_id
        self.distribution = self.cloudfront_connection.get_distribution_info(self.distribution_id)
        self.key_pair_id = key_pair_id
        self.private_key_string = private_key_string
コード例 #12
0
ファイル: bucketier.py プロジェクト: dleavitt/bhuket
    def __init__(self, bucket_name, aws_key, aws_secret):
        self.job_id = str(uuid.uuid1())
        self.bucket_name = bucket_name
        self.aws_key = aws_key
        self.aws_secret = aws_secret

        # TODO: clean up bucket name string? What's a valid AWS username?
        self.user_name = "bhuket-" + bucket_name

        self.iam = boto.connect_iam(aws_key, aws_secret)
        self.s3 = boto.connect_s3(aws_key, aws_secret)
        self.cloudfront = boto.connect_cloudfront(aws_key, aws_secret)
コード例 #13
0
def invalidate_cloudfront_objects(profile, distribution, objects):
    """Chunk the invalidation request into 1000 objects"""
    chunk_size = 3000  # upto 3000 according to this guide:
    # http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Invalidation.html
    conn = boto.connect_cloudfront(profile_name=profile)
    print('distribution info: %s' % conn.get_distribution_info(distribution))

    # Splitting the object list into chunks
    chunks = [objects[i:i + chunk_size] for i in range(0, len(objects), chunk_size)]

    # Create invalidation requests
    for paths in chunks:
        req = conn.create_invalidation_request(distribution, paths)
        print('invalidation request: %s' % req)
コード例 #14
0
ファイル: helpers.py プロジェクト: ypcs/gallery
def create_signed_url(url, **kwargs):
    """Create new signed CloudFront URL for
    given URL

    See <http://boto.cloudhackers.com/en/latest/ref/cloudfront.html>
    """
    conn = boto.connect_cloudfront(settings.AWS_ACCESS_KEY_ID,
                                   settings.AWS_SECRET_ACCESS_KEY)
    dist = conn.get_distribution_info(settings.AWS_CF_ID)

    signed = dist.create_signed_url(url, settings.AWS_KEYPAIR_ID,
                                    private_key_string=settings.AWS_KEYPAIR_PRIVATE_KEY,
                                    **kwargs)
    return signed
コード例 #15
0
ファイル: cloudfront.py プロジェクト: peterbe/kl
def _upload_to_cloudfront(filepath):
    global _cf_connection
    global _cf_distribution
    
    if _cf_connection is None:
        _cf_connection = boto.connect_cloudfront(settings.AWS_ACCESS_KEY, 
                                                 settings.AWS_ACCESS_SECRET)
                                                         
    if _cf_distribution is None:
        _cf_distribution = _cf_connection.create_distribution(
            origin='%s.s3.amazonaws.com' % settings.AWS_STORAGE_BUCKET_NAME,
            enabled=True,
            comment=settings.AWS_CLOUDFRONT_DISTRIBUTION_COMMENT)
                              
    
    # now we can delete any old versions of the same file that have the 
    # same name but a different timestamp
    basename = os.path.basename(filepath)
    object_regex = re.compile('%s\.(\d+)\.%s' % \
        (re.escape('.'.join(basename.split('.')[:-2])),
         re.escape(basename.split('.')[-1])))
    for obj in _cf_distribution.get_objects():
        match = object_regex.findall(obj.name)
        if match:
            old_timestamp = int(match[0])
            new_timestamp = int(object_regex.findall(basename)[0])
            if new_timestamp == old_timestamp:
                # an exact copy already exists
                return obj.url()
            elif new_timestamp > old_timestamp:
                # we've come across the same file but with an older timestamp
                #print "DELETE!", obj_.name
                obj.delete()
                break
    
    # Still here? That means that the file wasn't already in the distribution
    
    fp = open(filepath)
    
    # Because the name will always contain a timestamp we set faaar future 
    # caching headers. Doesn't matter exactly as long as it's really far future.
    headers = {'Cache-Control':'max-age=315360000, public',
               'Expires': 'Thu, 31 Dec 2037 23:55:55 GMT',
               }
               
    #print "\t\t\tAWS upload(%s)" % basename
    obj = _cf_distribution.add_object(basename, fp, headers=headers)
    return obj.url()
コード例 #16
0
ファイル: invalidate.py プロジェクト: kmcintyre/Snapyelp
def do_invalidate():
    
    c = boto.connect_cloudfront()
    for d in c.get_all_distributions():
        if d.origin.dns_name == app_util.app_bucket:
            print 'domain id:', d.id, 'domain name:', d.domain_name, 'domain status:', d.status, 'comment:', d.comment            
            for ir in c.get_invalidation_requests(d.id):
                if ir.status != 'Completed':
                    print 'invalidate request:', ir.id, ir.status
                    exit(1)
            paths = [res[len(publish.build_dir):] for res in publish.get_publish_list()]            
            if paths:
                print 'invalidate paths:', paths
                c.create_invalidation_request(d.id, paths)
            else:
                print 'invalidate skipped'                
コード例 #17
0
ファイル: cdn.py プロジェクト: paulvisen/flask-todo
def get_cache(force_rebuild=False):
    if not settings.AWS_ACCESS_KEY_ID:
        return {}
    if force_rebuild or not hasattr(get_cache, "cache"):
        if force_rebuild or not CACHE_FILE.exists():
            CACHE_FILE.dirname().makedirs_p()
            connection = connect_cloudfront(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
            distributions = connection.get_all_distributions()
            cache = {distribution.origin.dns_name: distribution.domain_name for distribution in distributions}
            with open(CACHE_FILE, "w") as handle:
                json.dump(cache, handle)
        else:
            with open(CACHE_FILE) as handle:
                cache = json.load(handle)
        get_cache.cache = cache
    return get_cache.cache
コード例 #18
0
ファイル: s3.py プロジェクト: noeldvictor/burlap
def invalidate(*paths):
    """
    Issues invalidation requests to a Cloudfront distribution
    for the current static media bucket, triggering it to reload the specified
    paths from the origin.
    
    Note, only 1000 paths can be issued in a request at any one time.
    """
    from burlap.dj import get_settings
    if not paths:
        return
    # http://boto.readthedocs.org/en/latest/cloudfront_tut.html
    _settings = get_settings()
    if not _settings.AWS_STATIC_BUCKET_NAME:
        print('No static media bucket set.')
        return
    if isinstance(paths, basestring):
        paths = paths.split(',')
    all_paths = map(str.strip, paths)
#    assert len(paths) <= 1000, \
#        'Cloudfront invalidation request limited to 1000 paths or less.'
    i = 0
    while 1:
        paths = all_paths[i:i+1000]
        if not paths:
            break
        
        #print 'paths:',paths
        c = boto.connect_cloudfront()
        rs = c.get_all_distributions()
        target_dist = None
        for dist in rs:
            print(dist.domain_name, dir(dist), dist.__dict__)
            bucket_name = dist.origin.dns_name.replace('.s3.amazonaws.com', '')
            if bucket_name == _settings.AWS_STATIC_BUCKET_NAME:
                target_dist = dist
                break
        if not target_dist:
            raise Exception(('Target distribution %s could not be found '
                'in the AWS account.') \
                    % (settings.AWS_STATIC_BUCKET_NAME,))
        print('Using distribution %s associated with origin %s.' \
            % (target_dist.id, _settings.AWS_STATIC_BUCKET_NAME))
        inval_req = c.create_invalidation_request(target_dist.id, paths)
        print('Issue invalidation request %s.' % (inval_req,))
        
        i += 1000
コード例 #19
0
ファイル: simpleaws.py プロジェクト: Miserlou/simpleaws
def connect():

    global connected
    global AWS_ACCESS_KEY
    global AWS_SECRET_ACCESS_KEY

    global iam
    global s3
    global cloudfront

    if not connected:
        iam = boto.connect_iam(AWS_ACCESS_KEY, AWS_SECRET_ACCESS_KEY)
        s3 = boto.connect_s3(AWS_ACCESS_KEY, AWS_SECRET_ACCESS_KEY)
        cloudfront = boto.connect_cloudfront(AWS_ACCESS_KEY, AWS_SECRET_ACCESS_KEY)
        connected = True

    return connected
コード例 #20
0
def upload_build_directory_to_s3(config):
    logger = logging.getLogger("%s.upload_build_directory_to_s3" % APP_NAME)
    logger.debug("entry. config.build_directory: %s, config.s3_bucket: %s" % (config.build_directory, config.s3_bucket))

    with contextlib.closing(boto.s3.connect_to_region('eu-west-1')) as conn:
        with contextlib.closing(boto.connect_cloudfront()) as conn_cloudfront:
            cloudfront_distribution = [elem for elem in conn_cloudfront.get_all_distributions() if config.s3_bucket in elem.origin.dns_name][0]
            cloudfront_distribution = cloudfront_distribution.get_distribution()
            all_subpaths = []

            bucket = conn.get_bucket(config.s3_bucket)
            manifest_key = bucket.get_key(config.manifest_filename)
            existing_manifest_hashes = {}
            if not manifest_key:
                logger.debug("manifest does not exist, upload everything.")
            else:
                logger.debug("manifest exists, may be duplicate files.")
                manifest_contents = manifest_key.get_contents_as_string()
                for line in manifest_contents.splitlines():
                    (subpath, digest) = line.strip().split()
                    existing_manifest_hashes[subpath] = digest

            build_path_length = len(config.build_directory.split(os.sep))
            for root, dirs, files in os.walk(config.build_directory):
                for filename in files:
                    filepath = os.path.join(root, filename)
                    subpath = posixpath.join(*filepath.split(os.sep)[build_path_length:])
                    digest = calculate_hash(filepath)
                    if existing_manifest_hashes.get(subpath, None) == digest:
                        logger.debug("file '%s' already exists and is identical, skipping." % subpath)
                        continue
                    logger.debug("uploading subpath '%s'" % subpath)
                    all_subpaths.append(subpath)
                    key = bucket.delete_key(subpath)
                    key = bucket.new_key(subpath)
                    if is_gzip_file(filepath):
                        logger.debug("mark as a gzipped file")
                        key.set_metadata("Content-Encoding", "gzip")
                    key.set_contents_from_filename(filepath)
                    key.make_public()

            logger.debug("creating cloudfront invalidation request")
            conn_cloudfront.create_invalidation_request(cloudfront_distribution.id, all_subpaths)
コード例 #21
0
ファイル: s3cf.py プロジェクト: cotsog/drupan
    def setup(self):
        """setup AWS connection"""
        if "." in self.bucket_name:  # work around for a boto bug
            self.s3_connection = S3Connection(
                self.aws_access_key,
                self.aws_secret_key,
                calling_format=OrdinaryCallingFormat(),
                host=self.s3_host,
            )
        else:
            self.s3_connection = S3Connection(
                self.aws_access_key,
                self.aws_secret_key,
            )
        self.bucket = self.s3_connection.get_bucket(self.bucket_name)

        if self.cloudfront_id:
            self.cf_connection = boto.connect_cloudfront(
                self.aws_access_key,
                self.aws_secret_key,
            )
コード例 #22
0
def push_json_to_s3(lineage, resolution, directory = '../auspice/data/', bucket = 'nextflu-dev', cloudfront = 'E1XKGZG0ZTX4YN'):
	"""Upload JSON files to S3 bucket"""
	"""Boto expects environmental variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY"""
	directory = directory.rstrip('/')+'/'

	import boto
	conn = boto.connect_s3()
	b = conn.get_bucket(bucket)
	k = boto.s3.key.Key(b)

	paths = []	

	print "Uploading JSONs for", lineage, resolution
	for postfix in ['tree.json', 'sequences.json', 'frequencies.json', 'meta.json']:
		json = lineage + '_' + resolution + '_' + postfix
		k.key = 'data/'+json
		k.set_contents_from_filename(directory+json)
		print json,"uploaded"
		paths.append('data/'+json)

	c = boto.connect_cloudfront()
	c.create_invalidation_request(cloudfront, paths)
コード例 #23
0
ファイル: cloudfront.py プロジェクト: noeldvictor/burlap
def get_or_create_distribution(s3_bucket):
    if not get_dryrun():
        conn = boto.connect_cloudfront()
        origin_dns = '%s.s3.amazonaws.com' % s3_bucket.name
        origin = boto.cloudfront.origin\
            .S3Origin(origin_dns)
#        origin = boto.cloudfront.origin\
#            .S3Origin(s3_bucket.get_website_endpoint())
        
        distro = None
        dists = conn.get_all_distributions()
        for d in dists:
            if origin_dns == d.get_distribution().config.origin.dns_name:
                distro = d
                break
        
        if not distro:
            distro = conn.create_distribution(origin=origin, enabled=True)
            
        return distro
    else:
        print('boto.connect_cloudfront().create_distribution(%s)' % repr(name))
コード例 #24
0
ファイル: reset-cache.py プロジェクト: Sn0rkY/dockerfiles
access_key = os.getenv("AWS_ACCESS_KEY")
access_secret = os.getenv("AWS_SECRET_KEY")
cloudfront_dist = os.getenv("AWS_CF_DISTRIBUTION_ID")
bucket = os.getenv("AWS_S3_BUCKET")

if access_key == "" or access_key is None:
    print "Please set AWS_ACCESS_KEY env variable."
    sys.exit(1)
elif access_secret == "" or access_secret is None:
    print "Please set AWS_SECRET_KEY env variable."
    sys.exit(1)
elif cloudfront_dist == "" or cloudfront_dist is None:
    print "Please set AWS_CF_DISTRIBUTION_ID env variable."
    sys.exit(1)
elif bucket == "" or bucket is None:
    print "Please set AWS_S3_BUCKET env variable."
    sys.exit(1)

# get the paths from s3
s3_conn = boto.connect_s3(access_key, access_secret)
docs = s3_conn.get_bucket(bucket)
items = []

for key in docs.list():
    items.append(key.name)

cf_conn = boto.connect_cloudfront(access_key, access_secret)
inval_req = cf_conn.create_invalidation_request(cloudfront_dist, items)

print inval_req
sys.exit(0)
コード例 #25
0
ファイル: shortcuts.py プロジェクト: mirskytech/mirskutils
def sign_s3_url(url, timeout=None):
    """create a signed url for amazon s3 authetication, requires that ``boto`` be installed"""
    c = boto.connect_cloudfront(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
    d = c.get_streaming_distribution_info(settings.CLOUDFRONT_DISTRIBUTION_ID)
    e = int(time.time()+timeout if timeout else getattr(settings, 'CLOUDFRONT_URL_TIMEOUT', 10))
    return d.create_signed_url(url, settings.CLOUDFRONT_KEY_PAIR_ID, private_key_file=settings.CLOUDFRONT_PEM)   
コード例 #26
0
ファイル: sync_s3.py プロジェクト: 18600597055/hue
 def open_cf(self):
     """
     Returns an open connection to CloudFront
     """
     return boto.connect_cloudfront(
         self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY)
コード例 #27
0
ファイル: s3utils.py プロジェクト: pombredanne/s3utils
 def connect_cloudfront(self):
     "Connect to Cloud Front. This is done automatically for you when needed."
     self.conn_cloudfront = connect_cloudfront(self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY, debug=self.S3UTILS_DEBUG_LEVEL)
コード例 #28
0
ファイル: AWS.py プロジェクト: zhangsquared/Django
 def __init__(self):
     self.conn = boto.connect_s3(aws_access_key_id=config.my_access_key,
                                               aws_secret_access_key =config.my_secret_key)
     self.c = boto.connect_cloudfront(config.my_access_key, config.my_secret_key)
     print 'establish connection for s3'
コード例 #29
0
import boto
c = boto.connect_cloudfront()
all_distributions = c.get_all_distributions()
print all_distributions
for distribution in all_distributions:
	print distribution.domain_name, "-->", distribution.comment, "-->", distribution.origin, "-->", distribution.status
#print distribution_id.domain_name
コード例 #30
0
ファイル: cloudfront.py プロジェクト: kmcintyre/Snapyelp
def create_distro():
    origin = boto.cloudfront.origin.S3Origin(app_util.app_bucket)
    distro = boto.connect_cloudfront().create_distribution(cnames=[app_util.app_name], origin=origin, enabled=False, comment='Snapyelp Distribution')
    return distro