Exemplo n.º 1
0
def upload_notes_build_directory_to_s3(config):
    """ !!AI TODO need a manifest, and a saner way of tracking files."""

    logger = logging.getLogger("%s.upload_notes_build_directory_to_s3" % APP_NAME)
    logger.debug("entry. config.notes_build_directory: %s, config.notes_s3_bucket: %s" % (config.notes_build_directory, config.notes_s3_bucket))

    with contextlib.closing(boto.connect_s3()) as conn:
        with contextlib.closing(boto.connect_cloudfront()) as conn_cloudfront:
            cloudfront_distribution = [elem for elem in conn_cloudfront.get_all_distributions() if config.s3_bucket in elem.origin.dns_name][0]
            cloudfront_distribution = cloudfront_distribution.get_distribution()
            bucket = conn.get_bucket(config.notes_s3_bucket)

            output_subpaths = []
            for subpath in sorted(config.notes_input_to_output.values()):
                posix_subpath = posixpath.join(*subpath.split(os.sep))
                posix_subdirectory = posixpath.split(posix_subpath)[0]
                output_subpaths.append(subpath)
                output_subpaths.append(posixpath.join(posix_subdirectory, "_pandoc.css"))
            output_filepaths = [os.path.normpath(os.path.join(config.notes_build_directory, subpath)) for subpath in output_subpaths]

            for (subpath, filepath) in zip(output_subpaths, output_filepaths):
                logger.debug("uploading subpath: '%s'" % subpath)
                key = bucket.delete_key(subpath)
                key = bucket.new_key(subpath)
                if is_gzip_file(filepath):
                    logger.debug("mark as a gzipped file")
                    key.set_metadata("Content-Encoding", "gzip")
                key.set_contents_from_filename(filepath)
                key.make_public()

            logger.debug("creating cloudfront invalidation request")
            conn_cloudfront.create_invalidation_request(cloudfront_distribution.id, output_subpaths)
Exemplo n.º 2
0
    def generate_url(self, _object):
        assert isinstance(_object, Object)

        bucket = _object.s3_bucket
        resource = u'%s%s' % (
            bucket.origin,
            _object.s3_key
        )

        conn = boto.connect_cloudfront()
        dist = conn.get_distribution_info(bucket.distribution_id)
        signers = filter(lambda signer: signer.id == u'Self',
                         dist.active_signers)
        if len(signers) < 1:
            raise SignerNotFound()

        key_pair = random.choice(filter(
            lambda key_pair: key_pair is not None,
            [
                self._get_key_pair_by_id(key_pair_id, False)
                for key_pair_id in signers[0].key_pair_ids
            ]
        ))
        signed_url = dist.create_signed_url(
            resource,
            key_pair.id,
            expire_time=int(time.time() + 60 * 60 * 6),  # 6hours
            private_key_string=key_pair.private
        )
        return signed_url
Exemplo n.º 3
0
def setup_s3(bucket_name=None):
    """ Setup s3 instance with cloudfront distribution. """
    if not bucket_name:
        bucket_name = '{0}-media'.format(PROJECT_NAME)
    conn = boto.connect_s3()
    try:
        conn.create_bucket(bucket_name,
                           location=_Location.EU,
                           policy='public-read')
    except _S3CreateError:
        pass

    print('AWS_STORAGE_BUCKET_NAME={0}'.format(bucket_name))

    if CLOUDFRONT_ENABLED:
        try:
            origin = CLOUDFRONT_CUSTOM_ORIGIN
        except NameError:
            origin = None
        if not origin:
            origin = '{0}.s3.amazonaws.com'.format(bucket_name)

        origin = _CustomOrigin(origin, origin_protocol_policy='http-only')
        conn = boto.connect_cloudfront()
        distro = conn.create_distribution(origin=origin, enabled=True)
        print('MEDIA_DOMAIN={0}'.format(distro.domain_name))
    else:
        bucket_url = '{0}.s3.amazonaws.com'.format(bucket_name)
        print('MEDIA_DOMAIN={0}'.format(bucket_url))
Exemplo n.º 4
0
def main():
    paths = open(invalidation_path, "r+")
    conn = boto.connect_cloudfront()
    inval_req = conn.create_invalidation_request(dist_id, paths)
    print inval_req
    touch = open(invalidation_path, "w")
    touch.write("")
Exemplo n.º 5
0
    def __init__(self):
        handlers = [
            (r"/", HomeHandler),
            (r"/login", LoginHandler),
            (r"/logout", LogoutHandler),
            (r"/register", RegisterHandler),
            (r"/video/([0-9]+)", VideoHandler),
            (r"/purchase/([a-zA-Z0-9-_]+)", PurchaseHandler),
            (r"/bucket", BucketHandler),
        ]
        settings = dict(
            site_title=u"Site Title",
            template_path=os.path.join(os.path.dirname(__file__), "templates"),
            static_path=os.path.join(os.path.dirname(__file__), "static"),
            static_url_prefix="https://s3-us-west-2.amazonaws.com/assets/",
            xsrf_cookies=True,
            cookie_secret="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
            login_url="/login",
            debug=False,
        )
        tornado.web.Application.__init__(self, handlers, **settings)

        self.db = torndb.Connection(
            host=options.mysql_host,
            database=options.mysql_database,
            user=options.mysql_user,
            password=options.mysql_password
        )
        
        self.cf = boto.connect_cloudfront()
Exemplo n.º 6
0
def invalidate_paths(dry_run=False):
    paths = _get_paths()
    num_paths = len(paths)
    if num_paths > 0:
        cloudfront = boto.connect_cloudfront(AWS_KEY, AWS_SECRET)
        # Can only do 1000 invalidates at a time, so send them in chunks
        # Note: 1st 1000 invalidations per month are free, anything over
        # that costs $$$
        chunk_begin = 0
        chunk_end = min(num_paths, 1000)
        while chunk_begin < num_paths:
            paths_chunk = paths[chunk_begin:chunk_end]
            if dry_run:
                print "Would've created an invalidation request."
            else:
                req = cloudfront.create_invalidation_request(DISTRIBUTION_ID, paths_chunk)
                print "Created invalidation request."

            chunk_begin = chunk_end
            chunk_end = min(chunk_end + 1000, num_paths)

    if dry_run:
        print "Without the dry_run arg, would've invalidated {0} paths.".format(num_paths)
    else:
        print "Invalidated {0} paths.".format(num_paths)
Exemplo n.º 7
0
    def get_or_create_distribution(self, s3_bucket_name):
        assert isinstance(s3_bucket_name, six.string_types)
        boto = get_boto()
        origin_dns = '%s.s3.amazonaws.com' % s3_bucket_name
        if not self.dryrun:
            conn = boto.connect_cloudfront(
                self.genv.aws_access_key_id,
                self.genv.aws_secret_access_key
            )
            origin = boto.cloudfront.origin.S3Origin(origin_dns)

            distro = None
            if self.verbose:
                # Loop over all distributions to determine whether this one exists already.
                dists = conn.get_all_distributions()
                for d in dists:
                    print('Checking existing Cloudfront distribution %s...' % d.get_distribution().config.origin.dns_name)
                    if origin_dns == d.get_distribution().config.origin.dns_name:
                        print('Found existing distribution!')
                        distro = d
                        break

                    # Necessary to avoid "Rate exceeded" errors.
                    time.sleep(0.4)

            if not distro:
                print('Creating new distribution from %s...' % origin)
                distro = conn.create_distribution(origin=origin, enabled=True)

            return distro
        else:
            print('boto.connect_cloudfront().create_distribution(%s)' % repr(origin_dns))
Exemplo n.º 8
0
def push_json_to_s3(lineage,
                    resolution,
                    directory='../auspice/data/',
                    bucket='nextflu-dev',
                    cloudfront='E1XKGZG0ZTX4YN'):
    """Upload JSON files to S3 bucket"""
    """Boto expects environmental variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY"""
    directory = directory.rstrip('/') + '/'

    import boto
    conn = boto.connect_s3()
    b = conn.get_bucket(bucket)
    k = boto.s3.key.Key(b)

    paths = []

    print "Uploading JSONs for", lineage, resolution
    for postfix in [
            'tree.json', 'sequences.json', 'frequencies.json', 'meta.json'
    ]:
        json = lineage + '_' + resolution + '_' + postfix
        k.key = 'data/' + json
        k.set_contents_from_filename(directory + json)
        print json, "uploaded"
        paths.append('data/' + json)

    c = boto.connect_cloudfront()
    c.create_invalidation_request(cloudfront, paths)
Exemplo n.º 9
0
def foia_file_delete_s3(sender, **kwargs):
    """Delete file from S3 after the model is deleted"""
    # pylint: disable=unused-argument

    if settings.CLEAN_S3_ON_FOIA_DELETE:
        # only delete if we are using s3
        foia_file = kwargs['instance']

        conn = S3Connection(
            settings.AWS_ACCESS_KEY_ID,
            settings.AWS_SECRET_ACCESS_KEY,
        )
        bucket = conn.get_bucket(settings.AWS_STORAGE_BUCKET_NAME)
        key = bucket.get_key(foia_file.ffile.name)
        if key:
            key.delete()

        # also clear the cloudfront cache
        cloudfront = boto.connect_cloudfront(
            settings.AWS_ACCESS_KEY_ID,
            settings.AWS_SECRET_ACCESS_KEY,
        )
        # find the current distribution
        distributions = [
            d for d in cloudfront.get_all_distributions()
            if settings.AWS_S3_CUSTOM_DOMAIN in d.cnames
        ]
        if distributions:
            distribution = distributions[0]
            cloudfront.create_invalidation_request(
                distribution.id,
                [foia_file.ffile.name],
            )
Exemplo n.º 10
0
    def get_or_create_distribution(self, s3_bucket_name):
        assert isinstance(s3_bucket_name, six.string_types)
        boto = get_boto()
        origin_dns = '%s.s3.amazonaws.com' % s3_bucket_name
        if not self.dryrun:
            conn = boto.connect_cloudfront(self.genv.aws_access_key_id,
                                           self.genv.aws_secret_access_key)
            origin = boto.cloudfront.origin.S3Origin(origin_dns)

            distro = None
            if self.verbose:
                # Loop over all distributions to determine whether this one exists already.
                dists = conn.get_all_distributions()
                for d in dists:
                    print('Checking existing Cloudfront distribution %s...' %
                          d.get_distribution().config.origin.dns_name)
                    if origin_dns == d.get_distribution(
                    ).config.origin.dns_name:
                        print('Found existing distribution!')
                        distro = d
                        break

                    # Necessary to avoid "Rate exceeded" errors.
                    time.sleep(0.4)

            if not distro:
                print('Creating new distribution from %s...' % origin)
                distro = conn.create_distribution(origin=origin, enabled=True)

            return distro
        else:
            print('boto.connect_cloudfront().create_distribution(%s)' %
                  repr(origin_dns))
Exemplo n.º 11
0
def purge_cloudfront_caches(page, request):
    try:
        distribution = CloudfrontDistribution.objects.all()[0]
        cf = boto.connect_cloudfront()
        cf.create_invalidation_request(distribution.distribution_id, [])
    except:  #TODO: This is a broad exception - but we are having issues with the cache. Will tighten down once we start using it.
        pass
  def do_activity(self, data = None):
    """
    Do the work
    """
    if(self.logger):
      self.logger.info('data: %s' % json.dumps(data, sort_keys=True, indent=4))
    
    self.db.connect()
    
    # cdn.elifesciences.org CDN ID
    distribution_id = self.settings.cdn_distribution_id
    
    invalidation_list = self.get_invalidation_list()
    
    # Connect to S3
    c_conn = boto.connect_cloudfront(self.settings.aws_access_key_id, self.settings.aws_secret_access_key)
    
    # Limit of 1000 URLs to invalidate at one time
    try:
      count = int(len(invalidation_list) / 1000) + 1
    except:
      # Divide by zero or something else
      return False

    array_of_invalidation_list = self.split_array(invalidation_list, count)

    for i_list in array_of_invalidation_list:
      inval_req = c_conn.create_invalidation_request(distribution_id, invalidation_list)

    if(self.logger):
      self.logger.info('LensCDNInvalidation: %s' % "")
    
    return True
Exemplo n.º 13
0
def setup_s3(bucket_name=None):
    """ Setup s3 instance with cloudfront distribution. """
    if not bucket_name:
        bucket_name = '{0}-media'.format(PROJECT_NAME)
    conn = boto.connect_s3()
    try:
        conn.create_bucket(bucket_name, location=_Location.EU,
                           policy='public-read')
    except _S3CreateError:
        pass

    print('AWS_STORAGE_BUCKET_NAME={0}'.format(bucket_name))

    if CLOUDFRONT_ENABLED:
        try:
            origin = CLOUDFRONT_CUSTOM_ORIGIN
        except NameError:
            origin = None
        if not origin:
            origin = '{0}.s3.amazonaws.com'.format(bucket_name)

        origin = _CustomOrigin(origin, origin_protocol_policy='http-only')
        conn = boto.connect_cloudfront()
        distro = conn.create_distribution(origin=origin, enabled=True)
        print('MEDIA_DOMAIN={0}'.format(distro.domain_name))
    else:
        bucket_url = '{0}.s3.amazonaws.com'.format(bucket_name)
        print('MEDIA_DOMAIN={0}'.format(bucket_url))
Exemplo n.º 14
0
    def s3_export(data, name):
        raw_data = json.dumps(data)
        script_content = 'window.EVENTS_DATA=' + raw_data

        with gzip.open(name + '.js.gz', 'wb') as f:
            f.write(str(script_content).encode('utf-8'))

        with open(name + '.json', 'w') as f:
            f.write(raw_data)

        aws_host = os.environ.get('AWS_HOST')
        aws_bucket = os.environ.get('S3_BUCKET')
        cloudfront_id = os.environ.get('CLOUDFRONT_ID')
        aws_region = os.environ.get('AWS_REGION')
        access_key_id = os.environ.get('AWS_ACCESS_KEY_ID')
        secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY')

        conn = boto.s3.connect_to_region(
            aws_region,
            aws_access_key_id=access_key_id,
            aws_secret_access_key=secret_access_key,
            is_secure=True,  # uncomment if you are not using ssl
            calling_format=boto.s3.connection.OrdinaryCallingFormat(),
        )

        bucket = conn.get_bucket(aws_bucket)

        key = bucket.get_key('output/' + name + '.js.gz')
        key_raw = bucket.get_key('raw/' + name + '.json')

        if key is None:
            print("Creating New Bucket")
            key = bucket.new_key('output/' + name + '.js.gz')

        if key_raw is None:
            print("Creating New Raw File")
            key_raw = bucket.new_key('raw/' + name + '.json')

        # Upload data to S3
        print("Uploading RAW to S3")
        key_raw.set_contents_from_filename(name + '.json')
        key_raw.set_acl('public-read')

        print("Uploading GZIP to S3")
        key.set_metadata('Content-Type', 'text/plain')
        key.set_metadata('Content-Encoding', 'gzip')
        key.set_contents_from_filename(name + '.js.gz')
        key.set_acl('public-read')

        # Cloudfront Invalidation requests
        print("Invalidating Output")
        cloudfront = boto.connect_cloudfront()
        paths = ['/output/*']
        inval_req = cloudfront.create_invalidation_request(
            cloudfront_id, paths)

        #Delete all files
        os.remove(name + ".js.gz")
        os.remove(name + ".json")
Exemplo n.º 15
0
    def __init__(self, conf):
        super(CDNProvider, self).__init__(conf)

        self._conf.register_opts(CLOUDFRONT_OPTIONS, group=CLOUDFRONT_GROUP)
        self.cloudfront_conf = self._conf[CLOUDFRONT_GROUP]
        self.cloudfront_client = boto.connect_cloudfront(
            aws_access_key_id=self.cloudfront_conf.aws_access_key_id,
            aws_secret_access_key=self.cloudfront_conf.aws_secret_access_key)
    def run(self, terms, variables=None, **kwargs):
        distributionId = terms[0]
        cloudfront_conn = boto.connect_cloudfront(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
        distributionConfig = cloudfront_conn.get_distribution_config(distributionId)
        if distributionConfig:
#            zz = dir(distributionConfig)
            return [distributionConfig.etag]
        return None
Exemplo n.º 17
0
    def __init__(self, conf):
        super(CDNProvider, self).__init__(conf)

        self._conf.register_opts(CLOUDFRONT_OPTIONS, group=CLOUDFRONT_GROUP)
        self.cloudfront_conf = self._conf[CLOUDFRONT_GROUP]
        self.cloudfront_client = boto.connect_cloudfront(
            aws_access_key_id=self.cloudfront_conf.aws_access_key_id,
            aws_secret_access_key=self.cloudfront_conf.aws_secret_access_key)
Exemplo n.º 18
0
def publish_program(host, program):
    import boto
    c = boto.connect_cloudfront()
    for d in c.get_all_distributions():
        if d.origin.dns_name == host:
            print 'good'
        else:
            print 'bad', d.origin.dns_name
Exemplo n.º 19
0
def perform_create_cloudfront_invalidation(dispatcher, intent):
    """
    See :class:`CreateCloudFrontInvalidation`.
    """
    cf = boto.connect_cloudfront()
    distribution = [dist for dist in cf.get_all_distributions()
                    if intent.cname in dist.cnames][0]
    cf.create_invalidation_request(distribution.id, intent.paths)
def main():
        distid = 'xxxxxxxxxx'
        invalidationfilepath = '/home/ec2-user/invalid.txt'
        paths  = open(invalidationfilepath,"r+")
        conn = boto.connect_cloudfront()
        inval_req = conn.create_invalidation_request(distid, paths)
        print inval_req
        touch = open(invalidationfilepath,"w")
        touch.write("")
Exemplo n.º 21
0
    def __init__(self, server_name, distribution_id, key_pair_id, private_key_string, aws_access_key_id=None, aws_secret_access_key=None):
        super(AmazonCloudFrontS3Storage, self).__init__(server_name)

        self.cloudfront_connection = boto.connect_cloudfront(aws_access_key_id, aws_secret_access_key)
        self.s3_connection = boto.connect_s3(aws_access_key_id, aws_secret_access_key)

        self.distribution_id = distribution_id
        self.distribution = self.cloudfront_connection.get_distribution_info(self.distribution_id)
        self.key_pair_id = key_pair_id
        self.private_key_string = private_key_string
 def run(self, terms, variables=None, **kwargs):
     distributionId = terms[0]
     cloudfront_conn = boto.connect_cloudfront(AWS_ACCESS_KEY_ID,
                                               AWS_SECRET_ACCESS_KEY)
     distributionConfig = cloudfront_conn.get_distribution_config(
         distributionId)
     if distributionConfig:
         #            zz = dir(distributionConfig)
         return [distributionConfig.etag]
     return None
Exemplo n.º 23
0
def perform_create_cloudfront_invalidation(dispatcher, intent):
    """
    See :class:`CreateCloudFrontInvalidation`.
    """
    cf = boto.connect_cloudfront()
    distribution = [
        dist for dist in cf.get_all_distributions()
        if intent.cname in dist.cnames
    ][0]
    cf.create_invalidation_request(distribution.id, intent.paths)
Exemplo n.º 24
0
    def __init__(self, bucket_name, aws_key, aws_secret):
        self.job_id = str(uuid.uuid1())
        self.bucket_name = bucket_name
        self.aws_key = aws_key
        self.aws_secret = aws_secret

        # TODO: clean up bucket name string? What's a valid AWS username?
        self.user_name = "bhuket-" + bucket_name

        self.iam = boto.connect_iam(aws_key, aws_secret)
        self.s3 = boto.connect_s3(aws_key, aws_secret)
        self.cloudfront = boto.connect_cloudfront(aws_key, aws_secret)
Exemplo n.º 25
0
def run():
    indiv_groups = indivisible_group.grab_data()
    indiv_groupmtg = indivisible_groupmtg.grab_data()
    indiv_action = indivisible_action.grab_data()

    data = indiv_groupmtg + indiv_action + indiv_groups
    content = 'window.INDIVISIBLE_EVENTS=' + json.dumps(data)

    # Locally Store Data
    with gzip.open('data/indivisible-data.js.gz', 'wb') as f:
        f.write(str(content).encode('utf-8'))

    with open('data/indivisible.json', 'w') as f:
        f.write(content)

    # START
    aws_host = os.environ.get('AWS_HOST')
    conn = S3Connection(host=aws_host)

    bucket = conn.get_bucket('pplsmap-data')
    key = bucket.get_key('output/indivisible.js.gz')
    key_raw = bucket.get_key('raw/indivisible.json')

    # Retrieve Keys
    if key is None:
        print("Creating New Bucket")
        key = bucket.new_key('output/indivisible.js.gz')

    if key_raw is None:
        print("Creating New Raw File")
        key_raw = bucket.new_key('raw/indivisible.json')

    # Upload data to S3
    print("Uploading RAW to S3")
    key_raw.set_contents_from_filename('data/indivisible.json')
    key_raw.set_acl('public-read')

    print("Uploading GZIP to S3")
    key.set_metadata('Content-Type', 'text/plain')
    key.set_metadata('Content-Encoding', 'gzip')
    key.set_contents_from_filename('data/indivisible-data.js.gz')
    key.set_acl('public-read')

    # Cloudfront Invalidation requests
    print("Invalidating Indivisible Output")
    cloudfront = boto.connect_cloudfront()
    paths = ['/output/*']
    inval_req = cloudfront.create_invalidation_request(u'EXFHJXIFH495H', paths)

    #Delete all files
    os.remove("data/indivisible-data.js.gz")
    os.remove("data/indivisible.json")
    os.remove("data/indivisible.csv")
Exemplo n.º 26
0
 def connect_cloudfront(self,
                        aws_access_key_id=None,
                        aws_secret_access_key=None,
                        **kwargs):
     if aws_access_key_id == None:
         aws_access_key_id = self.config.get("Credentials",
                                             "aws_access_key_id")
     if aws_secret_access_key == None:
         aws_secret_access_key = self.config.get("Credentials",
                                                 "aws_secret_access_key")
     return boto.connect_cloudfront(aws_access_key_id,
                                    aws_secret_access_key, **kwargs)
Exemplo n.º 27
0
    def s3_export(data, name):
        raw_data = json.dumps(data)
        script_content = 'window.EVENTS_DATA=' + raw_data

        with gzip.open(name + '.js.gz', 'wb') as f:
            f.write(str(script_content).encode('utf-8'))

        with open(name + '.json', 'w') as f:
            f.write(raw_data)

        aws_host = os.environ.get('AWS_HOST')
        aws_bucket = os.environ.get('S3_BUCKET')
        cloudfront_id = os.environ.get('CLOUDFRONT_ID')

        conn = S3Connection(host=aws_host)

        bucket = conn.get_bucket(aws_bucket)

        key = bucket.get_key('output/' + name + '.js.gz')
        key_raw = bucket.get_key('raw/' + name + '.json')

        if key is None:
            print("Creating New Bucket")
            key = bucket.new_key('output/' + name + '.js.gz')

        if key_raw is None:
            print("Creating New Raw File")
            key_raw = bucket.new_key('raw/' + name + '.json')

        # Upload data to S3
        print("Uploading RAW to S3")
        key_raw.set_contents_from_filename(name + '.json')
        key_raw.set_acl('public-read')

        print("Uploading GZIP to S3")
        key.set_metadata('Content-Type', 'text/plain')
        key.set_metadata('Content-Encoding', 'gzip')
        key.set_contents_from_filename(name + '.js.gz')
        key.set_acl('public-read')

        # Cloudfront Invalidation requests
        print("Invalidating Output")
        cloudfront = boto.connect_cloudfront()
        paths = ['/output/*']
        inval_req = cloudfront.create_invalidation_request(
            cloudfront_id, paths)

        #Delete all files
        os.remove(name + ".js.gz")
        os.remove(name + ".json")
Exemplo n.º 28
0
def create_signed_url(url, **kwargs):
    """Create new signed CloudFront URL for
    given URL

    See <http://boto.cloudhackers.com/en/latest/ref/cloudfront.html>
    """
    conn = boto.connect_cloudfront(settings.AWS_ACCESS_KEY_ID,
                                   settings.AWS_SECRET_ACCESS_KEY)
    dist = conn.get_distribution_info(settings.AWS_CF_ID)

    signed = dist.create_signed_url(url, settings.AWS_KEYPAIR_ID,
                                    private_key_string=settings.AWS_KEYPAIR_PRIVATE_KEY,
                                    **kwargs)
    return signed
Exemplo n.º 29
0
def invalidate_cloudfront_objects(profile, distribution, objects):
    """Chunk the invalidation request into 1000 objects"""
    chunk_size = 3000  # upto 3000 according to this guide:
    # http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Invalidation.html
    conn = boto.connect_cloudfront(profile_name=profile)
    print('distribution info: %s' % conn.get_distribution_info(distribution))

    # Splitting the object list into chunks
    chunks = [objects[i:i + chunk_size] for i in range(0, len(objects), chunk_size)]

    # Create invalidation requests
    for paths in chunks:
        req = conn.create_invalidation_request(distribution, paths)
        print('invalidation request: %s' % req)
Exemplo n.º 30
0
def do_invalidate(paths, origin):
    print 'do invalidate'
    import boto
    c = boto.connect_cloudfront()
    for d in c.get_all_distributions():
        print 'distribution:', d.origin.dns_name, d.cnames, origin
        if d.cnames[0] == str(sys.argv[1]):
            print d.id, d.domain_name, d.status, d.comment
            for ir in c.get_invalidation_requests(d.id):
                if ir.status != 'Completed':
                    return 'no can do!!!'
                    print 'invalidate request:', ir.id, ir.status
            print 'create invalidation'
            c.create_invalidation_request(d.id, paths)
Exemplo n.º 31
0
def run():
    data = peoplepower_action.grab_data()
    raw = json.dumps(data)
    content = 'window.PEOPLEPOWER_EVENTS=' + json.dumps(data)

    # Locally Store Data
    with gzip.open('data/peoplepower.js.gz', 'wb') as f:
        f.write(str(content).encode('utf-8'))

    with open('data/peoplepower.json', 'w') as f:
        f.write(raw)

    # START
    aws_host = os.environ.get('AWS_HOST')
    conn = S3Connection(host=aws_host)

    bucket = conn.get_bucket('pplsmap-data')
    key = bucket.get_key('output/peoplepower.js.gz')
    key_raw = bucket.get_key('raw/peoplepower.json')

    # Retrieve Keys
    if key is None:
        print("Creating New Bucket")
        key = bucket.new_key('output/peoplepower.js.gz')

    if key_raw is None:
        print("Creating New Raw File")
        key_raw = bucket.new_key('raw/peoplepower.json')

    # Upload data to S3
    print("Uploading RAW to S3")
    key_raw.set_contents_from_filename('data/peoplepower.json')
    key_raw.set_acl('public-read')

    print("Uploading GZIP to S3")
    key.set_metadata('Content-Type', 'text/plain')
    key.set_metadata('Content-Encoding', 'gzip')
    key.set_contents_from_filename('data/peoplepower.js.gz')
    key.set_acl('public-read')

    # Cloudfront Invalidation requests
    print("Invalidating ACLU Output")
    cloudfront = boto.connect_cloudfront()
    paths = ['/output/*']
    inval_req = cloudfront.create_invalidation_request(u'EXFHJXIFH495H', paths)

    os.remove("data/peoplepower.js.gz")
    os.remove("data/peoplepower.json")
Exemplo n.º 32
0
def _upload_to_cloudfront(filepath):
    global _cf_connection
    global _cf_distribution
    
    if _cf_connection is None:
        _cf_connection = boto.connect_cloudfront(settings.AWS_ACCESS_KEY, 
                                                 settings.AWS_ACCESS_SECRET)
                                                         
    if _cf_distribution is None:
        _cf_distribution = _cf_connection.create_distribution(
            origin='%s.s3.amazonaws.com' % settings.AWS_STORAGE_BUCKET_NAME,
            enabled=True,
            comment=settings.AWS_CLOUDFRONT_DISTRIBUTION_COMMENT)
                              
    
    # now we can delete any old versions of the same file that have the 
    # same name but a different timestamp
    basename = os.path.basename(filepath)
    object_regex = re.compile('%s\.(\d+)\.%s' % \
        (re.escape('.'.join(basename.split('.')[:-2])),
         re.escape(basename.split('.')[-1])))
    for obj in _cf_distribution.get_objects():
        match = object_regex.findall(obj.name)
        if match:
            old_timestamp = int(match[0])
            new_timestamp = int(object_regex.findall(basename)[0])
            if new_timestamp == old_timestamp:
                # an exact copy already exists
                return obj.url()
            elif new_timestamp > old_timestamp:
                # we've come across the same file but with an older timestamp
                #print "DELETE!", obj_.name
                obj.delete()
                break
    
    # Still here? That means that the file wasn't already in the distribution
    
    fp = open(filepath)
    
    # Because the name will always contain a timestamp we set faaar future 
    # caching headers. Doesn't matter exactly as long as it's really far future.
    headers = {'Cache-Control':'max-age=315360000, public',
               'Expires': 'Thu, 31 Dec 2037 23:55:55 GMT',
               }
               
    #print "\t\t\tAWS upload(%s)" % basename
    obj = _cf_distribution.add_object(basename, fp, headers=headers)
    return obj.url()
Exemplo n.º 33
0
def get_cache(force_rebuild=False):
    if not settings.AWS_ACCESS_KEY_ID:
        return {}
    if force_rebuild or not hasattr(get_cache, "cache"):
        if force_rebuild or not CACHE_FILE.exists():
            CACHE_FILE.dirname().makedirs_p()
            connection = connect_cloudfront(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
            distributions = connection.get_all_distributions()
            cache = {distribution.origin.dns_name: distribution.domain_name for distribution in distributions}
            with open(CACHE_FILE, "w") as handle:
                json.dump(cache, handle)
        else:
            with open(CACHE_FILE) as handle:
                cache = json.load(handle)
        get_cache.cache = cache
    return get_cache.cache
Exemplo n.º 34
0
def do_invalidate():
    
    c = boto.connect_cloudfront()
    for d in c.get_all_distributions():
        if d.origin.dns_name == app_util.app_bucket:
            print 'domain id:', d.id, 'domain name:', d.domain_name, 'domain status:', d.status, 'comment:', d.comment            
            for ir in c.get_invalidation_requests(d.id):
                if ir.status != 'Completed':
                    print 'invalidate request:', ir.id, ir.status
                    exit(1)
            paths = [res[len(publish.build_dir):] for res in publish.get_publish_list()]            
            if paths:
                print 'invalidate paths:', paths
                c.create_invalidation_request(d.id, paths)
            else:
                print 'invalidate skipped'                
Exemplo n.º 35
0
def get_cache(force_rebuild=False):
    if not settings.AWS_ACCESS_KEY_ID:
        return {}
    if force_rebuild or not hasattr(get_cache, 'cache'):
        if force_rebuild or not CACHE_FILE.exists():
            CACHE_FILE.dirname().makedirs_p()
            connection = connect_cloudfront(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
            distributions = connection.get_all_distributions()
            cache = {distribution.origin.dns_name: distribution.domain_name for distribution in distributions}
            with open(CACHE_FILE, 'w') as handle:
                json.dump(cache, handle)
        else:
            with open(CACHE_FILE) as handle:
                cache = json.load(handle)
        get_cache.cache = cache
    return get_cache.cache
Exemplo n.º 36
0
def invalidate(*paths):
    """
    Issues invalidation requests to a Cloudfront distribution
    for the current static media bucket, triggering it to reload the specified
    paths from the origin.
    
    Note, only 1000 paths can be issued in a request at any one time.
    """
    from burlap.dj import get_settings
    if not paths:
        return
    # http://boto.readthedocs.org/en/latest/cloudfront_tut.html
    _settings = get_settings()
    if not _settings.AWS_STATIC_BUCKET_NAME:
        print('No static media bucket set.')
        return
    if isinstance(paths, basestring):
        paths = paths.split(',')
    all_paths = map(str.strip, paths)
#    assert len(paths) <= 1000, \
#        'Cloudfront invalidation request limited to 1000 paths or less.'
    i = 0
    while 1:
        paths = all_paths[i:i+1000]
        if not paths:
            break
        
        #print 'paths:',paths
        c = boto.connect_cloudfront()
        rs = c.get_all_distributions()
        target_dist = None
        for dist in rs:
            print(dist.domain_name, dir(dist), dist.__dict__)
            bucket_name = dist.origin.dns_name.replace('.s3.amazonaws.com', '')
            if bucket_name == _settings.AWS_STATIC_BUCKET_NAME:
                target_dist = dist
                break
        if not target_dist:
            raise Exception(('Target distribution %s could not be found '
                'in the AWS account.') \
                    % (settings.AWS_STATIC_BUCKET_NAME,))
        print('Using distribution %s associated with origin %s.' \
            % (target_dist.id, _settings.AWS_STATIC_BUCKET_NAME))
        inval_req = c.create_invalidation_request(target_dist.id, paths)
        print('Issue invalidation request %s.' % (inval_req,))
        
        i += 1000
Exemplo n.º 37
0
def invalidate(*paths):
    """
    Issues invalidation requests to a Cloudfront distribution
    for the current static media bucket, triggering it to reload the specified
    paths from the origin.
    
    Note, only 1000 paths can be issued in a request at any one time.
    """
    from burlap.dj import get_settings
    if not paths:
        return
    # http://boto.readthedocs.org/en/latest/cloudfront_tut.html
    _settings = get_settings()
    if not _settings.AWS_STATIC_BUCKET_NAME:
        print('No static media bucket set.')
        return
    if isinstance(paths, basestring):
        paths = paths.split(',')
    all_paths = map(str.strip, paths)
    #    assert len(paths) <= 1000, \
    #        'Cloudfront invalidation request limited to 1000 paths or less.'
    i = 0
    while 1:
        paths = all_paths[i:i + 1000]
        if not paths:
            break

        #print 'paths:',paths
        c = boto.connect_cloudfront()
        rs = c.get_all_distributions()
        target_dist = None
        for dist in rs:
            print(dist.domain_name, dir(dist), dist.__dict__)
            bucket_name = dist.origin.dns_name.replace('.s3.amazonaws.com', '')
            if bucket_name == _settings.AWS_STATIC_BUCKET_NAME:
                target_dist = dist
                break
        if not target_dist:
            raise Exception(('Target distribution %s could not be found '
                'in the AWS account.') \
                    % (settings.AWS_STATIC_BUCKET_NAME,))
        print('Using distribution %s associated with origin %s.' \
            % (target_dist.id, _settings.AWS_STATIC_BUCKET_NAME))
        inval_req = c.create_invalidation_request(target_dist.id, paths)
        print('Issue invalidation request %s.' % (inval_req, ))

        i += 1000
Exemplo n.º 38
0
def connect():

    global connected
    global AWS_ACCESS_KEY
    global AWS_SECRET_ACCESS_KEY

    global iam
    global s3
    global cloudfront

    if not connected:
        iam = boto.connect_iam(AWS_ACCESS_KEY, AWS_SECRET_ACCESS_KEY)
        s3 = boto.connect_s3(AWS_ACCESS_KEY, AWS_SECRET_ACCESS_KEY)
        cloudfront = boto.connect_cloudfront(AWS_ACCESS_KEY, AWS_SECRET_ACCESS_KEY)
        connected = True

    return connected
Exemplo n.º 39
0
def connect():

    global connected
    global AWS_ACCESS_KEY
    global AWS_SECRET_ACCESS_KEY

    global iam
    global s3
    global cloudfront

    if not connected:
        iam = boto.connect_iam(AWS_ACCESS_KEY, AWS_SECRET_ACCESS_KEY)
        s3 = boto.connect_s3(AWS_ACCESS_KEY, AWS_SECRET_ACCESS_KEY)
        cloudfront = boto.connect_cloudfront(AWS_ACCESS_KEY,
                                             AWS_SECRET_ACCESS_KEY)
        connected = True

    return connected
Exemplo n.º 40
0
def upload_build_directory_to_s3(config):
    logger = logging.getLogger("%s.upload_build_directory_to_s3" % APP_NAME)
    logger.debug("entry. config.build_directory: %s, config.s3_bucket: %s" % (config.build_directory, config.s3_bucket))

    with contextlib.closing(boto.s3.connect_to_region('eu-west-1')) as conn:
        with contextlib.closing(boto.connect_cloudfront()) as conn_cloudfront:
            cloudfront_distribution = [elem for elem in conn_cloudfront.get_all_distributions() if config.s3_bucket in elem.origin.dns_name][0]
            cloudfront_distribution = cloudfront_distribution.get_distribution()
            all_subpaths = []

            bucket = conn.get_bucket(config.s3_bucket)
            manifest_key = bucket.get_key(config.manifest_filename)
            existing_manifest_hashes = {}
            if not manifest_key:
                logger.debug("manifest does not exist, upload everything.")
            else:
                logger.debug("manifest exists, may be duplicate files.")
                manifest_contents = manifest_key.get_contents_as_string()
                for line in manifest_contents.splitlines():
                    (subpath, digest) = line.strip().split()
                    existing_manifest_hashes[subpath] = digest

            build_path_length = len(config.build_directory.split(os.sep))
            for root, dirs, files in os.walk(config.build_directory):
                for filename in files:
                    filepath = os.path.join(root, filename)
                    subpath = posixpath.join(*filepath.split(os.sep)[build_path_length:])
                    digest = calculate_hash(filepath)
                    if existing_manifest_hashes.get(subpath, None) == digest:
                        logger.debug("file '%s' already exists and is identical, skipping." % subpath)
                        continue
                    logger.debug("uploading subpath '%s'" % subpath)
                    all_subpaths.append(subpath)
                    key = bucket.delete_key(subpath)
                    key = bucket.new_key(subpath)
                    if is_gzip_file(filepath):
                        logger.debug("mark as a gzipped file")
                        key.set_metadata("Content-Encoding", "gzip")
                    key.set_contents_from_filename(filepath)
                    key.make_public()

            logger.debug("creating cloudfront invalidation request")
            conn_cloudfront.create_invalidation_request(cloudfront_distribution.id, all_subpaths)
Exemplo n.º 41
0
    def s3_simple_file_export(dataStr, bucketKey):
        """
        dataStr = "Is a string"
        bucketKey = 'folder/subfolder/filename.ext'
        """

        aws_host = os.environ.get('AWS_HOST')
        aws_bucket = os.environ.get('S3_BUCKET')
        cloudfront_id = os.environ.get('CLOUDFRONT_ID')
        aws_region = os.environ.get('AWS_REGION')
        access_key_id = os.environ.get('AWS_ACCESS_KEY_ID')
        secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY')

        tempFile = "__" + bucketKey.split('/')[-1]

        with open(tempFile, 'w') as f:
            f.write(dataStr)

        conn = boto.s3.connect_to_region(
            aws_region,
            aws_access_key_id=access_key_id,
            aws_secret_access_key=secret_access_key,
            is_secure=True,  # uncomment if you are not using ssl
            calling_format=boto.s3.connection.OrdinaryCallingFormat(),
        )

        bucket = conn.get_bucket(aws_bucket)
        key = bucket.get_key(bucketKey)

        if key is None:
            print("Creating New Bucket")
            key = bucket.new_key(bucketKey)

        key.set_contents_from_filename(tempFile)
        key.set_acl('public-read')

        print("Refreshing Export")
        cloudfront = boto.connect_cloudfront()
        paths = [bucketKey]
        inval_req = cloudfront.create_invalidation_request(
            cloudfront_id, paths)

        os.remove(tempFile)
Exemplo n.º 42
0
    def setup(self):
        """setup AWS connection"""
        if "." in self.bucket_name:  # work around for a boto bug
            self.s3_connection = S3Connection(
                self.aws_access_key,
                self.aws_secret_key,
                calling_format=OrdinaryCallingFormat(),
                host=self.s3_host,
            )
        else:
            self.s3_connection = S3Connection(
                self.aws_access_key,
                self.aws_secret_key,
            )
        self.bucket = self.s3_connection.get_bucket(self.bucket_name)

        if self.cloudfront_id:
            self.cf_connection = boto.connect_cloudfront(
                self.aws_access_key,
                self.aws_secret_key,
            )
Exemplo n.º 43
0
    def setup(self):
        """setup AWS connection"""
        if "." in self.bucket_name:  # work around for a boto bug
            self.s3_connection = S3Connection(
                self.aws_access_key,
                self.aws_secret_key,
                calling_format=OrdinaryCallingFormat(),
                host=self.s3_host,
            )
        else:
            self.s3_connection = S3Connection(
                self.aws_access_key,
                self.aws_secret_key,
            )
        self.bucket = self.s3_connection.get_bucket(self.bucket_name)

        if self.cloudfront_id:
            self.cf_connection = boto.connect_cloudfront(
                self.aws_access_key,
                self.aws_secret_key,
            )
Exemplo n.º 44
0
def push_json_to_s3(lineage, resolution, directory = '../auspice/data/', bucket = 'nextflu-dev', cloudfront = 'E1XKGZG0ZTX4YN'):
	"""Upload JSON files to S3 bucket"""
	"""Boto expects environmental variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY"""
	directory = directory.rstrip('/')+'/'

	import boto
	conn = boto.connect_s3()
	b = conn.get_bucket(bucket)
	k = boto.s3.key.Key(b)

	paths = []	

	print "Uploading JSONs for", lineage, resolution
	for postfix in ['tree.json', 'sequences.json', 'frequencies.json', 'meta.json']:
		json = lineage + '_' + resolution + '_' + postfix
		k.key = 'data/'+json
		k.set_contents_from_filename(directory+json)
		print json,"uploaded"
		paths.append('data/'+json)

	c = boto.connect_cloudfront()
	c.create_invalidation_request(cloudfront, paths)
Exemplo n.º 45
0
def get_or_create_distribution(s3_bucket):
    if not get_dryrun():
        conn = boto.connect_cloudfront()
        origin_dns = '%s.s3.amazonaws.com' % s3_bucket.name
        origin = boto.cloudfront.origin\
            .S3Origin(origin_dns)
        #        origin = boto.cloudfront.origin\
        #            .S3Origin(s3_bucket.get_website_endpoint())

        distro = None
        dists = conn.get_all_distributions()
        for d in dists:
            if origin_dns == d.get_distribution().config.origin.dns_name:
                distro = d
                break

        if not distro:
            distro = conn.create_distribution(origin=origin, enabled=True)

        return distro
    else:
        print('boto.connect_cloudfront().create_distribution(%s)' % repr(name))
Exemplo n.º 46
0
def get_or_create_distribution(s3_bucket):
    if not get_dryrun():
        conn = boto.connect_cloudfront()
        origin_dns = '%s.s3.amazonaws.com' % s3_bucket.name
        origin = boto.cloudfront.origin\
            .S3Origin(origin_dns)
#        origin = boto.cloudfront.origin\
#            .S3Origin(s3_bucket.get_website_endpoint())
        
        distro = None
        dists = conn.get_all_distributions()
        for d in dists:
            if origin_dns == d.get_distribution().config.origin.dns_name:
                distro = d
                break
        
        if not distro:
            distro = conn.create_distribution(origin=origin, enabled=True)
            
        return distro
    else:
        print('boto.connect_cloudfront().create_distribution(%s)' % repr(name))
Exemplo n.º 47
0
    def do_activity(self, data=None):
        """
    Do the work
    """
        if (self.logger):
            self.logger.info('data: %s' %
                             json.dumps(data, sort_keys=True, indent=4))

        self.db.connect()

        # cdn.elifesciences.org CDN ID
        distribution_id = self.settings.cdn_distribution_id

        invalidation_list = self.get_invalidation_list()

        # Connect to S3
        c_conn = boto.connect_cloudfront(self.settings.aws_access_key_id,
                                         self.settings.aws_secret_access_key)

        # Limit of 1000 URLs to invalidate at one time
        try:
            count = int(len(invalidation_list) / 1000) + 1
        except:
            # Divide by zero or something else
            return False

        array_of_invalidation_list = self.split_array(invalidation_list, count)

        for i_list in array_of_invalidation_list:
            inval_req = c_conn.create_invalidation_request(
                distribution_id, invalidation_list)

        if (self.logger):
            self.logger.info('LensCDNInvalidation: %s' % "")

        return True
Exemplo n.º 48
0
 def open_cf(self):
     """
     Returns an open connection to CloudFront
     """
     return boto.connect_cloudfront(
         self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY)
Exemplo n.º 49
0
access_key = os.getenv("AWS_ACCESS_KEY")
access_secret = os.getenv("AWS_SECRET_KEY")
cloudfront_dist = os.getenv("AWS_CF_DISTRIBUTION_ID")
bucket = os.getenv("AWS_S3_BUCKET")

if access_key == "" or access_key is None:
    print "Please set AWS_ACCESS_KEY env variable."
    sys.exit(1)
elif access_secret == "" or access_secret is None:
    print "Please set AWS_SECRET_KEY env variable."
    sys.exit(1)
elif cloudfront_dist == "" or cloudfront_dist is None:
    print "Please set AWS_CF_DISTRIBUTION_ID env variable."
    sys.exit(1)
elif bucket == "" or bucket is None:
    print "Please set AWS_S3_BUCKET env variable."
    sys.exit(1)

# get the paths from s3
s3_conn = boto.connect_s3(access_key, access_secret)
docs = s3_conn.get_bucket(bucket)
items = []

for key in docs.list():
    items.append(key.name)

cf_conn = boto.connect_cloudfront(access_key, access_secret)
inval_req = cf_conn.create_invalidation_request(cloudfront_dist, items)

print inval_req
sys.exit(0)
Exemplo n.º 50
0
def create_distro():
    origin = boto.cloudfront.origin.S3Origin(app_util.app_bucket)
    distro = boto.connect_cloudfront().create_distribution(cnames=[app_util.app_name], origin=origin, enabled=False, comment='Snapyelp Distribution')
    return distro
Exemplo n.º 51
0
def sign_s3_url(url, timeout=None):
    """create a signed url for amazon s3 authetication, requires that ``boto`` be installed"""
    c = boto.connect_cloudfront(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
    d = c.get_streaming_distribution_info(settings.CLOUDFRONT_DISTRIBUTION_ID)
    e = int(time.time()+timeout if timeout else getattr(settings, 'CLOUDFRONT_URL_TIMEOUT', 10))
    return d.create_signed_url(url, settings.CLOUDFRONT_KEY_PAIR_ID, private_key_file=settings.CLOUDFRONT_PEM)   
Exemplo n.º 52
0
 def __init__(self):
     self.conn = boto.connect_s3(aws_access_key_id=config.my_access_key,
                                               aws_secret_access_key =config.my_secret_key)
     self.c = boto.connect_cloudfront(config.my_access_key, config.my_secret_key)
     print 'establish connection for s3'
Exemplo n.º 53
0
elif key.endswith('exe'):
    latest = 'latest.exe'
elif key.endswith('32-bit.deb'):
    latest = 'latest-32.deb'
elif key.endswith('64-bit.deb'):
    latest = 'latest-64.deb'
else:
    print 'File name with full version required. .deb files should end in 32-bit.deb or 64-bit.deb'
    sys.exit(1)

conn = boto.connect_s3()
b = conn.get_bucket('lantern')

k = Key(b)
k.key = key
k.copy('lantern', latest, preserve_acl=True)

# Since we've just updated the fixed name 'lantest.x' file in our bucket,
# we need to make sure to invalidate it on cloudfront in case anyone's
# using it.
print 'Invalidating latest installers on CloudFront...'
c = boto.connect_cloudfront()
#rs = c.get_all_distributions()
#ds = rs[1]
#distro = ds.get_distribution()
#print distro.domain_name
#print distro.id
paths = [latest]
inval_req = c.create_invalidation_request(u'E1D7VOTZEUYRZT', paths)
status = c.invalidation_request_status(u'E1D7VOTZEUYRZT', inval_req.id)
Exemplo n.º 54
0
 def connect_cloudfront(self):
     "Connect to Cloud Front. This is done automatically for you when needed."
     self.conn_cloudfront = connect_cloudfront(self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY, debug=self.S3UTILS_DEBUG_LEVEL)
Exemplo n.º 55
0
def main():
    logging.basicConfig(level=logging.INFO)

    parser = argparse.ArgumentParser(
        description='AWS S3 website deployment tool')
    parser.add_argument('-f',
                        '--force',
                        action='store_true',
                        dest='force',
                        help='force upload of all files')
    parser.add_argument('-n',
                        '--dry-run',
                        action='store_true',
                        dest='dry',
                        help='run without uploading any files')
    parser.add_argument(
        'path',
        help='the .s3_website.yaml configuration file or directory',
        default='.',
        nargs='?')
    args = parser.parse_args()

    # Open configuration file
    conf, base_path = config.load_config_file(args.path)

    bucket_name = conf['s3_bucket']
    cache_rules = conf.get('cache_rules', [])
    if 's3_reduced_redundancy' in conf.keys():
        reduced_redundancy = conf['s3_reduced_redundancy']
    else:
        reduced_redundancy = False

    logger.info('Connecting to bucket {}...'.format(bucket_name))

    conn = S3Connection(calling_format=OrdinaryCallingFormat())
    bucket = conn.get_bucket(bucket_name, validate=False)

    site_dir = os.path.join(base_path, conf['site'])

    logger.info('Site: {}'.format(site_dir))

    processed_keys = set()
    updated_keys = set()

    for key in bucket:
        processed_keys.add(key.key)
        path = os.path.join(site_dir, key.key)

        # Delete keys that have been deleted locally
        if not os.path.isfile(path):
            logger.info('Deleting {}...'.format(key.key))
            if not args.dry:
                key.delete()
            updated_keys.add(key.key)
            continue

        # Skip keys that have not been updated
        mtime = int(os.path.getmtime(path))
        if not args.force:
            # Update key metadata if not available.
            # The bucket list() call that is executed through the bucket
            # iteration above actually does obtain the last modified date
            # from the server, but boto currently does not update the key
            # variables based on that. We need to do an additional get_key()
            # request to get the field populated.
            key = bucket.get_key(key.key)
            key_mtime = mktime_tz(parsedate_tz(key.last_modified))
            if mtime <= key_mtime:
                logger.info('Not modified, skipping {}.'.format(key.key))
                continue

        upload_key(key,
                   path,
                   cache_rules,
                   args.dry,
                   replace=True,
                   reduced_redundancy=reduced_redundancy)
        updated_keys.add(key.key)

    for dirpath, dirnames, filenames in os.walk(site_dir):
        key_base = os.path.relpath(dirpath, site_dir)
        for name in filenames:
            path = os.path.join(dirpath, name)
            key_name = key_name_from_path(os.path.join(key_base, name))
            if key_name in processed_keys:
                continue

            # Create new key
            key = Key(bucket)
            key.key = key_name

            logger.info('Creating key {}...'.format(key_name))

            upload_key(key,
                       path,
                       cache_rules,
                       args.dry,
                       replace=False,
                       reduced_redundancy=reduced_redundancy)
            updated_keys.add(key_name)

    logger.info('Bucket update done.')

    # Invalidate files in cloudfront distribution
    if 'cloudfront_distribution_id' in conf:
        logger.info('Connecting to Cloudfront distribution {}...'.format(
            conf['cloudfront_distribution_id']))

        index_pattern = None
        if 'index_document' in conf:
            index_doc = conf['index_document']
            index_pattern = r'(^(?:.*/)?)' + re.escape(index_doc) + '$'

        def path_from_key_name(key_name):
            if index_pattern is not None:
                m = re.match(index_pattern, key_name)
                if m:
                    return m.group(1)
            return key_name

        t = PrefixCoverTree()
        for key_name in updated_keys:
            t.include(path_from_key_name(key_name))
        for key_name in processed_keys - updated_keys:
            t.exclude(path_from_key_name(key_name))

        paths = []
        for prefix, exact in t.matches():
            path = '/' + prefix + ('' if exact else '*')
            logger.info('Preparing to invalidate {}...'.format(path))
            paths.append(path)

        conn = boto.connect_cloudfront()

        if len(paths) > 0:
            dist_id = conf['cloudfront_distribution_id']
            if not args.dry:
                logger.info('Creating invalidation request...')
                conn.create_invalidation_request(dist_id, paths)
        else:
            logger.info('Nothing updated, skipping invalidation...')

        logger.info('Cloudfront invalidation done.')
import boto, os
conn = boto.connect_cloudfront(os.environ['AWS_ACCESS_KEY_ID'],
                               os.environ['AWS_SECRET_ACCESS_KEY'])
folder_name = os.path.split(os.getcwd())[-1]  # same as folder name
print "Invalidating files: "
paths = [
    'en/' + folder_name + '/index.html',
    'en/' + folder_name + '/game.js',
    'en/' + folder_name + '/api.js',
    'en/' + folder_name + '/promo.zip',
    # Add more files
]
for path in paths:
    print path
inval_req = conn.create_invalidation_request(u'E1OS1XR1ELDPOJ', paths)

print 'Cloudfront invalidation done ... please check again after 5 minutes'
Exemplo n.º 57
0
 def open_cf(self):
     """
     Returns an open connection to CloudFront
     """
     return boto.connect_cloudfront(self.AWS_ACCESS_KEY_ID,
                                    self.AWS_SECRET_ACCESS_KEY)
Exemplo n.º 58
0
import boto
c = boto.connect_cloudfront()
all_distributions = c.get_all_distributions()
print all_distributions
for distribution in all_distributions:
	print distribution.domain_name, "-->", distribution.comment, "-->", distribution.origin, "-->", distribution.status
#print distribution_id.domain_name
Exemplo n.º 59
0
def main():
    cloudfront_connection = boto.connect_cloudfront()
    s3_connection = connect_to_s3()
    bucket = Bucket(s3_connection, S3_BUCKET)
    publish(bucket, cloudfront_connection)