Ejemplo n.º 1
0
def main():
    parser = ArgumentParser()
    parser.add_argument('in_file', metavar='IN-FILE', help='The file to read.')
    parser.add_argument('out_file',
                        metavar='OUT-FILE',
                        help='The file to write.')
    parser.add_argument(
        'stream_id',
        metavar="STREAM-ID",
        help='The new stream for the items.  By default, a revision-build'
        ' stream.')
    parser.add_argument(
        '--update-path',
        action='store_true',
        help='Update the path to put the agent in "agent/STREAM"')
    parser.add_argument(
        '--agent-stream',
        action='store_true',
        help='Interpret STREAM-ID as an agent-stream value, not a'
        ' revision build.')
    args = parser.parse_args()
    with open(args.in_file) as in_file:
        stanzas = json.load(in_file)
    if args.agent_stream:
        stream = args.stream_id
    else:
        stream = 'revision-build-{}'.format(args.stream_id)
    content_id = 'com.ubuntu.juju:{}:tools'.format(stream)
    for stanza in stanzas:
        stanza['content_id'] = content_id
        if not args.update_path:
            continue
        path = os.path.join('agent', stream, os.path.basename(stanza['path']))
        stanza['path'] = path
    json_dump(stanzas, args.out_file)
Ejemplo n.º 2
0
def main():
    parser = ArgumentParser()
    parser.add_argument('in_file', metavar='IN-FILE',
                        help='The file to read.')
    parser.add_argument('out_file', metavar='OUT-FILE',
                        help='The file to write.')
    parser.add_argument(
        'stream_id', metavar="STREAM-ID",
        help='The new stream for the items.  By default, a revision-build'
        ' stream.')
    parser.add_argument(
        '--update-path', action='store_true',
        help='Update the path to put the agent in "agent/STREAM"')
    parser.add_argument(
        '--agent-stream', action='store_true',
        help='Interpret STREAM-ID as an agent-stream value, not a'
        ' revision build.')
    args = parser.parse_args()
    with open(args.in_file) as in_file:
        stanzas = json.load(in_file)
    if args.agent_stream:
        stream = args.stream_id
    else:
        stream = 'revision-build-{}'.format(args.stream_id)
    content_id = 'com.ubuntu.juju:{}:tools'.format(stream)
    for stanza in stanzas:
        stanza['content_id'] = content_id
        if not args.update_path:
            continue
        path = os.path.join(
            'agent', stream, os.path.basename(stanza['path']))
        stanza['path'] = path
    json_dump(stanzas, args.out_file)
Ejemplo n.º 3
0
def write_juju_streams(out_d, trees, updated, sticky):
    # Based on simplestreams.json2streams.write_juju_streams +
    # simplestreams.generate_simplestreams.write_streams,
    # but allows sticky to be specified.
    namer = WindowsFriendlyNamer
    index = generate_index(trees, updated, namer)
    to_write = [(
        namer.get_index_path(),
        index,
    )]
    # Don't let products_condense modify the input
    trees = deepcopy(trees)
    for content_id in trees:
        util.products_condense(trees[content_id], sticky=sticky)
        content = trees[content_id]
        to_write.append((
            index['index'][content_id]['path'],
            content,
        ))
    out_filenames = []
    for (outfile, data) in to_write:
        filef = os.path.join(out_d, outfile)
        util.mkdir_p(os.path.dirname(filef))
        json_dump(data, filef)
        out_filenames.append(filef)
    return out_filenames
Ejemplo n.º 4
0
def write_release_index(out_d):
    in_path = os.path.join(out_d, JujuFileNamer.get_index_path())
    with open(in_path) as in_file:
        full_index = json.load(in_file)
    full_index['index'] = dict(
        (k, v) for k, v in list(full_index['index'].items())
        if k == 'com.ubuntu.juju:released:tools')
    out_path = os.path.join(out_d, FileNamer.get_index_path())
    json_dump(full_index, out_path)
    return out_path
Ejemplo n.º 5
0
    def write_stanzas(self):
        """Write stanzas about the file to the filename.

        This calculates the hashes as part of the procedure.
        """
        with open(self.tarfile) as tarfile_fp:
            content = tarfile_fp.read()
        hashes = {}
        for hash_algorithm in self.hash_algorithms:
            hash_obj = hashlib.new(hash_algorithm)
            hash_obj.update(content)
            hashes[hash_algorithm] = hash_obj.hexdigest()
        stanzas = list(self.make_stanzas(hashes, len(content)))
        json_dump(stanzas, self.filename)
Ejemplo n.º 6
0
    def write_stanzas(self):
        """Write stanzas about the file to the filename.

        This calculates the hashes as part of the procedure.
        """
        with open(self.tarfile) as tarfile_fp:
            content = tarfile_fp.read()
        hashes = {}
        for hash_algorithm in self.hash_algorithms:
            hash_obj = hashlib.new(hash_algorithm)
            hash_obj.update(content)
            hashes[hash_algorithm] = hash_obj.hexdigest()
        stanzas = list(self.make_stanzas(hashes, len(content)))
        json_dump(stanzas, self.filename)
Ejemplo n.º 7
0
def write_juju_streams(out_d, trees, updated, sticky):
    # Based on simplestreams.json2streams.write_juju_streams +
    # simplestreams.generate_simplestreams.write_streams,
    # but allows sticky to be specified.
    namer = WindowsFriendlyNamer
    index = generate_index(trees, updated, namer)
    to_write = [(namer.get_index_path(), index,)]
    # Don't let products_condense modify the input
    trees = deepcopy(trees)
    for content_id in trees:
        util.products_condense(
            trees[content_id], sticky=sticky)
        content = trees[content_id]
        to_write.append((index['index'][content_id]['path'], content,))
    out_filenames = []
    for (outfile, data) in to_write:
        filef = os.path.join(out_d, outfile)
        util.mkdir_p(os.path.dirname(filef))
        json_dump(data, filef)
        out_filenames.append(filef)
    return out_filenames
Ejemplo n.º 8
0
def main():
    parser = ArgumentParser()
    parser.add_argument('input')
    parser.add_argument('output')
    args = parser.parse_args()
    paths_hashes = {}
    with open(args.input) as input_file:
        stanzas = json.load(input_file)
    hashes = {}
    old_hash_urls = {}
    for stanza in stanzas:
        path_hash = stanza['sha256']
        old_hash_urls[path_hash] = stanza['item_url']
        agent_filename = stanza['path'].split('/')[-1]
        path = 'agent/{}/{}'.format(stanza['version'], agent_filename)
        path = re.sub('-win(2012(hv)?(r2)?|2016(nano)?|7|8|81|10)-',
                      '-windows-', path)
        paths_hashes.setdefault(path, stanza['sha256'])
        if paths_hashes[path] != path_hash:
            raise ValueError('Conflicting hash')
        stanza['path'] = path
        hashes[path] = path_hash
    ph_list = {}
    for path, path_hash in hashes.items():
        ph_list.setdefault(path_hash, set()).add(path)
    for path_hash, paths in ph_list.items():
        if len(paths) > 1:
            print(paths)
    json_dump(stanzas, args.output)
    agent_downloads = []
    for stanza in stanzas:
        agent_downloads.append({
            'path': stanza['path'],
            'sha256': stanza['sha256'],
            'url': old_hash_urls[stanza['sha256']],
        })
    json_dump(agent_downloads, 'downloads-' + args.output)