Example #1
0
 def test_s3join(self):
     """
     Verify that s3 pathing is assembled correctly
     """
     self.assertEqual(s3join('a','b','c'),'a/b/c')
     # Double slashes should become single slashes:
     self.assertEqual(s3join('a/','/b/','/c'),'a/b/c')
     # Verify that trailing / is preserved:
     self.assertEqual(s3join('a','b','c/'),'a/b/c/')
     return
Example #2
0
 def test_s3join(self):
     """
     Verify that s3 pathing is assembled correctly
     """
     self.assertEqual(s3join('a', 'b', 'c'), 'a/b/c')
     # Double slashes should become single slashes:
     self.assertEqual(s3join('a/', '/b/', '/c'), 'a/b/c')
     # Verify that trailing / is preserved:
     self.assertEqual(s3join('a', 'b', 'c/'), 'a/b/c/')
     return
Example #3
0
def upload_repodata(context):
    """
    Upload repodata to the specified bucket.
    """
    upload_directory(context, context.working_dir, context.opts.path,
                     context.s3_rpm_items)

    # ALWAYS delete the existing s3 metadata items and upload the new ones.
    # We NEVER use check_items here:
    # Delete old metadata:
    for item in context.s3_repodata_items:
        verbose("Deleting old metadata file: %s", item.name)
        if not context.opts.dry_run:
            item.delete()

    # Delete any --remove'd RPM's:
    for item in context.s3_rpm_items:
        for remove_rpm in context.opts.remove:
            if fnmatch.fnmatch(item.name, remove_rpm):
                verbose("Deleting: %s", item.name)
                if not context.opts.dry_run:
                    item.delete()

    # Upload new metadata:
    repo_dest = s3join(context.opts.path, REPODATA)
    upload_directory(context, context.working_dir_repodata, repo_dest)
    return
Example #4
0
def upload_repodata(context):
    """
    Upload repodata to the specified bucket.
    """
    upload_directory(
        context,
        context.working_dir,
        context.opts.path,
        context.s3_rpm_items)

    # ALWAYS delete the existing s3 metadata items and upload the new ones.
    # We NEVER use check_items here:
    # Delete old metadata:
    for item in context.s3_repodata_items:
        verbose("Deleting old metadata file: %s", item.name)
        if not context.opts.dry_run:
            item.delete()

    # Delete any --remove'd RPM's:
    for item in context.s3_rpm_items:
        for remove_rpm in context.opts.remove:
            if fnmatch.fnmatch(item.name, remove_rpm):
                verbose("Deleting: %s", item.name)
                if not context.opts.dry_run:
                    item.delete()

    # Upload new metadata:
    repo_dest = s3join(context.opts.path, REPODATA)
    upload_directory(context, context.working_dir_repodata, repo_dest)
    return
Example #5
0
def upload_directory(context, dir_path, upload_prefix, check_items=[]):
    """
    Upload all the files in the directory 'dir_path' into the s3 bucket.
    The variable 'upload_prefix' is the path relative to the s3 bucket.
    The list item 'check_items' is a list of existing s3 items at this path.
    If an item to be uploaded is found in check_items, it is skipped.
    """

    items_by_name = dict(zip(map(lambda x: os.path.basename(x.name), check_items), check_items))

    # Upload RPM's:
    for filename in os.listdir(dir_path):
        filepath = os.path.join(dir_path, filename)
        remote_item = items_by_name.get(filename, None)

        # Skip any non-file arguments:
        if not os.path.isfile(filepath):
            continue

        # Skip anything that doesn't need to be uploaded:
        if not should_upload(filepath, remote_item, context.opts.force_upload):
            verbose('File "%s" already exists in S3 location "%s" skipping upload', filename, upload_prefix)
            continue

        # Perform the upload:
        dest_path = s3join(upload_prefix, filename)
        item_key = boto.s3.key.Key(context.s3_bucket)
        item_key.key = dest_path
        if not context.opts.dry_run:
            item_key.set_contents_from_filename(
                filepath, cb=get_progress_fn(context.opts.verbose, "Uploading: %s" % dest_path)
            )
        else:
            verbose("Uploading: %s" % dest_path)
    return
Example #6
0
def list_metadata(context):
    """
    List the current repo metadata items in s3, storing in s3_repodata_items.
    """
    context.s3_repodata_path = s3join(context.opts.path, REPODATA)
    key_list = context.s3_bucket.list(prefix=context.s3_repodata_path)

    context.s3_repodata_items = []
    for item in key_list:
        if item.name.find(FOLDER_SUFFIX) != -1:
            continue

        context.s3_repodata_items.append(item)

    return
Example #7
0
def print_lists(context):
    """
    Print repo info and bail.
    """
    def list_item(item):
        print "\t%s - %ib - %s" % (item.name, item.size, item.last_modified)

    print "Repo info for %s:" % (s3join(context.opts.bucket,
                                        context.opts.path))
    for metadata_item in context.s3_repodata_items:
        list_item(metadata_item)

    for rpm_item in context.s3_rpm_items:
        list_item(rpm_item)
    return
Example #8
0
def list_metadata(context):
    """
    List the current repo metadata items in s3, storing in s3_repodata_items.
    """
    context.s3_repodata_path = s3join(context.opts.path, REPODATA)
    key_list = context.s3_bucket.list(prefix=context.s3_repodata_path)

    context.s3_repodata_items = []
    for item in key_list:
        if item.name.find(FOLDER_SUFFIX) != -1:
            continue

        context.s3_repodata_items.append(item)

    return
Example #9
0
def print_lists(context):
    """
    Print repo info and bail.
    """
    def list_item(item):
        print "\t%s - %ib - %s" % (
            item.name, item.size, item.last_modified)

    print "Repo info for %s:" % (s3join(context.opts.bucket, context.opts.path))
    for metadata_item in context.s3_repodata_items:
        list_item(metadata_item)

    for rpm_item in context.s3_rpm_items:
        list_item(rpm_item)
    return
Example #10
0
def upload_directory(context, dir_path, upload_prefix, check_items=[]):
    """
    Upload all the files in the directory 'dir_path' into the s3 bucket.
    The variable 'upload_prefix' is the path relative to the s3 bucket.
    The list item 'check_items' is a list of existing s3 items at this path.
    If an item to be uploaded is found in check_items, it is skipped.
    """

    items_by_name = dict(
        zip(map(lambda x: os.path.basename(x.name), check_items), check_items))

    # Upload RPM's:
    for filename in os.listdir(dir_path):
        filepath = os.path.join(dir_path, filename)
        remote_item = items_by_name.get(filename, None)

        # Skip any non-file arguments:
        if not os.path.isfile(filepath):
            continue

        # Skip anything that doesn't need to be uploaded:
        if not should_upload(filepath, remote_item, context.opts.force_upload):
            verbose(
                'File "%s" already exists in S3 location "%s" skipping upload',
                filename, upload_prefix)
            continue

        # Perform the upload:
        dest_path = s3join(upload_prefix, filename)
        item_key = boto.s3.key.Key(context.s3_bucket)
        item_key.key = dest_path
        if not context.opts.dry_run:
            item_key.set_contents_from_filename(
                filepath,
                cb=get_progress_fn(context.opts.verbose,
                                   "Uploading: %s" % dest_path))
        else:
            verbose("Uploading: %s" % dest_path)
    return