Esempio n. 1
0
def sync_rpm_repository(repo_path, yum_config, repos):
    lock_path = os.path.join(repo_path, 'repolock')

    if not os.path.exists(repo_path):
        os.makedirs(repo_path)

    # Avoid cache issues due to repo injection to reposync-config.repo
    tmp_dir = tempfile.mkdtemp(prefix='reposync_')
    tmp_cache_dir = os.path.join(tmp_dir, 'cache')
    os.mkdir(tmp_cache_dir)

    LOGGER.debug('Using {} as cache dir for reposync'.format(tmp_cache_dir))

    reposync_base_cmd = [
        'reposync', '--config', yum_config, '--download_path', repo_path,
        '--newest-only', '--delete', '--cachedir', tmp_cache_dir
    ]
    with LogTask('Running reposync'):
        try:
            for repo in repos:
                with LockFile(lock_path, timeout=180):
                    reposync_cmd = reposync_base_cmd + ['--repoid', repo]
                    ret, out, _ = run_command(reposync_cmd)
                    if not ret:
                        LOGGER.debug(
                            'reposync on repo {}: success.'.format(repo))
                        continue

                    LOGGER.info('repo: {}: failed, re-running.'.format(repo))
                    _fix_reposync_issues(reposync_out=out,
                                         repo_path=os.path.join(
                                             repo_path, repo))
                    ret, _, _ = run_command(reposync_cmd)
                    if not ret:
                        continue

                    LOGGER.info('repo: {}: failed. clearing cache '
                                'and re-running.'.format(repo))
                    shutil.rmtree(tmp_cache_dir)
                    os.mkdir(tmp_cache_dir)

                    ret, out, err = run_command(reposync_cmd)
                    if ret:
                        LOGGER.error(
                            'reposync command failed for repoid: {repo_id}\n'
                            'stdout:\n{stdout}\n'
                            'stderr:\n{stderr}'.format(repo_id=repo,
                                                       stdout=out,
                                                       stderr=err))

                        raise RuntimeError(
                            ('Failed to run reposync 3 times '
                             'for repoid: {}, aborting.').format(repo))
        finally:
            LOGGER.debug('Removing temp cache dir {}'.format(tmp_dir))
            shutil.rmtree(tmp_dir)
Esempio n. 2
0
File: vm.py Progetto: bellle/lago
 def _reclaim_disk(self, path):
     qemu_uid = None
     try:
         qemu_uid = pwd.getpwnam('qemu').pw_uid
     except KeyError:
         pass
     if qemu_uid is not None and os.stat(path).st_uid == qemu_uid:
         utils.run_command(['sudo', '-u', 'qemu', 'chmod', 'a+rw', path])
     else:
         os.chmod(path, 0o0666)
Esempio n. 3
0
File: vm.py Progetto: nirs/lago
 def _reclaim_disk(self, path):
     qemu_uid = None
     try:
         qemu_uid = pwd.getpwnam('qemu').pw_uid
     except KeyError:
         pass
     if qemu_uid is not None and os.stat(path).st_uid == qemu_uid:
         utils.run_command(['sudo', '-u', 'qemu', 'chmod', 'a+rw', path])
     else:
         os.chmod(path, 0o0666)
Esempio n. 4
0
def sync_rpm_repository(repo_path, yum_config, repos):
    lock_path = os.path.join(repo_path, 'repolock')

    if not os.path.exists(repo_path):
        os.makedirs(repo_path)

    reposync_base_cmd = [
        'reposync', '--config=%s' % yum_config,
        '--download_path=%s' % repo_path, '--newest-only', '--delete',
        '--cachedir=%s/cache' % repo_path
    ]
    with LogTask('Running reposync'):
        for repo in repos:
            with LockFile(lock_path, timeout=180):
                reposync_cmd = reposync_base_cmd + ['--repoid=%s' % repo]
                ret, out, _ = run_command(reposync_cmd)
                if not ret:
                    LOGGER.debug('reposync on repo %s: success.' % repo)
                    continue

                LOGGER.info('repo: %s: failed, re-running.', repo)
                _fix_reposync_issues(
                    reposync_out=out, repo_path=os.path.join(repo_path, repo)
                )
                ret, _, _ = run_command(reposync_cmd)
                if not ret:
                    continue

                LOGGER.info(
                    'repo: %s: failed. clearing cache and re-running.', repo
                )
                shutil.rmtree('%s/cache' % repo_path)

                ret, out, err = run_command(reposync_cmd)
                if ret:
                    LOGGER.error(
                        'reposync command failed for repoid: %s', repo
                    )
                    LOGGER.error(
                        'reposync stdout for repoid: %s: \n%s', repo, out
                    )
                    LOGGER.error(
                        'reposync stderr for repoid: %s: \n%s', repo, err
                    )

                    raise RuntimeError(
                        (
                            'Failed to run reposync 3 times '
                            'for repoid: %s, aborting.'
                        ) % repo
                    )
Esempio n. 5
0
def test_ansible_inventory(monkeypatch, env, test_results, vms):

    # ansible returns the results in a bulk to stdout. Ideally we would test
    # forthe hostname of each machine, but that is broken on debian.
    # Instead, we let it compute something and count the unique occurences.

    cmd = 'echo __abcd$(( 24 + 12 ))efgh___'
    expected = '__abcd36efgh__'
    results = []

    with env.ansible_inventory_temp_file(keys=['groups']) as inv:
        for group in ['group0', 'group1']:
            logfile = os.path.join(
                test_results, 'ansible-{0}.log'.format(group)
            )
            monkeypatch.setenv('ANSIBLE_LOG_PATH', logfile)
            monkeypatch.setenv('ANSIBLE_HOST_KEY_CHECKING', 'False')
            res = run_command(
                [
                    'ansible', 'groups={0}'.format(group), '-v', '-u', 'root',
                    '-i', inv.name, '-m', 'raw', '-a', cmd
                ]
            )

            assert res.code == 0
            assert res.out is not None
            results.append(res)

    occurences = sum([result.out.count(expected) for result in results])

    assert occurences == len(vms.keys())
Esempio n. 6
0
def find_repo_by_name(name, repo_dir=None):
    """
    Searches the given repo name inside the repo_dir (will use the config value
    'template_repos' if no repo dir passed), will rise an exception if not
    found

    Args:
        name (str): Name of the repo to search
        repo_dir (str): Directory where to search the repo

    Return:
        str: path to the repo

    Raises:
        RuntimeError: if not found
    """
    if repo_dir is None:
        repo_dir = config.get('template_repos')

    ret, out, _ = utils.run_command([
        'find',
        repo_dir,
        '-name',
        '*.json',
    ], )

    repos = [
        TemplateRepository.from_url(line.strip()) for line in out.split('\n')
        if len(line.strip())
    ]

    for repo in repos:
        if repo.name == name:
            return repo
    raise RuntimeError('Could not find repo %s' % name)
Esempio n. 7
0
def test_ansible_inventory(monkeypatch, env, test_results, vms):

    # ansible returns the results in a bulk to stdout. Ideally we would test
    # forthe hostname of each machine, but that is broken on debian.
    # Instead, we let it compute something and count the unique occurences.

    cmd = 'echo __abcd$(( 24 + 12 ))efgh___'
    expected = '__abcd36efgh__'
    results = []

    with env.ansible_inventory_temp_file(keys=['groups']) as inv:
        for group in ['group0', 'group1']:
            logfile = os.path.join(test_results,
                                   'ansible-{0}.log'.format(group))
            monkeypatch.setenv('ANSIBLE_LOG_PATH', logfile)
            monkeypatch.setenv('ANSIBLE_HOST_KEY_CHECKING', 'False')
            res = run_command([
                'ansible', 'groups={0}'.format(group), '-v', '-u', 'root',
                '-i', inv.name, '-m', 'raw', '-a', cmd
            ])

            assert res.code == 0
            assert res.out is not None
            results.append(res)

    occurences = sum([result.out.count(expected) for result in results])

    assert occurences == len(vms.keys())
Esempio n. 8
0
File: vm.py Progetto: bellle/lago
    def revert_snapshot(self, name):
        try:
            snap_info = self.vm._spec['snapshots'][name]
        except KeyError:
            raise RuntimeError('No snapshot %s for %s' %
                               (name, self.vm.name()))

        with LogTask('Reverting %s to snapshot %s' % (self.vm.name(), name)):

            was_alive = self.alive()
            if was_alive:
                self.stop()
            for disk, disk_template in zip(self.vm._spec['disks'], snap_info):
                os.unlink(os.path.expandvars(disk['path']))
                ret, _, _ = utils.run_command(
                    [
                        'qemu-img',
                        'create',
                        '-f',
                        'qcow2',
                        '-b',
                        disk_template['path'],
                        disk['path'],
                    ],
                    cwd=os.path.dirname(os.path.expandvars(disk['path'])),
                )
                if ret != 0:
                    raise RuntimeError('Failed to revert disk')

            self._reclaim_disks()
            if was_alive:
                self.start()
Esempio n. 9
0
def _fix_reposync_issues(reposync_out, repo_path):
    """
    Fix for the issue described at::
        https://bugzilla.redhat.com/show_bug.cgi?id=1332441
    """
    LOGGER.warn(
        'Due to bug https://bugzilla.redhat.com/show_bug.cgi?id=1332441 '
        'sometimes reposync fails to update some packages that have older '
        'versions already downloaded, will remove those if any and retry')
    package_regex = re.compile(r'(?P<package_name>[^:\r\s]+): \[Errno 256\]')
    for match in package_regex.findall(reposync_out):
        find_command = [
            'find',
            repo_path,
            '-name',
            match + '*',
        ]
        ret, out, _ = run_command(find_command)

        if ret:
            raise RuntimeError('Failed to execute %s' % find_command)

        for to_remove in out.splitlines():
            if not to_remove.startswith(repo_path):
                LOGGER.warn('Skipping out-of-repo file %s', to_remove)
                continue

            LOGGER.info('Removing: %s', to_remove)
            os.unlink(to_remove)
Esempio n. 10
0
def _fix_reposync_issues(reposync_out, repo_path):
    """
    Fix for the issue described at::
        https://bugzilla.redhat.com/show_bug.cgi?id=1332441
    """
    LOGGER.warn(
        'Due to bug https://bugzilla.redhat.com/show_bug.cgi?id=1332441 '
        'sometimes reposync fails to update some packages that have older '
        'versions already downloaded, will remove those if any and retry'
    )
    package_regex = re.compile(r'(?P<package_name>[^:\r\s]+): \[Errno 256\]')
    for match in package_regex.findall(reposync_out):
        find_command = ['find', repo_path, '-name', match + '*', ]
        ret, out, _ = run_command(find_command)

        if ret:
            raise RuntimeError('Failed to execute %s' % find_command)

        for to_remove in out.splitlines():
            if not to_remove.startswith(repo_path):
                LOGGER.warn('Skipping out-of-repo file %s', to_remove)
                continue

            LOGGER.info('Removing: %s', to_remove)
            os.unlink(to_remove)
Esempio n. 11
0
File: vm.py Progetto: nirs/lago
    def revert_snapshot(self, name):
        try:
            snap_info = self.vm._spec['snapshots'][name]
        except KeyError:
            raise RuntimeError(
                'No snapshot %s for %s' % (name, self.vm.name())
            )

        with LogTask('Reverting %s to snapshot %s' % (self.vm.name(), name)):

            was_alive = self.alive()
            if was_alive:
                self.stop()
            for disk, disk_template in zip(self.vm._spec['disks'], snap_info):
                os.unlink(os.path.expandvars(disk['path']))
                ret, _, _ = utils.run_command(
                    [
                        'qemu-img',
                        'create',
                        '-f',
                        'qcow2',
                        '-b',
                        disk_template['path'],
                        disk['path'],
                    ],
                    cwd=os.path.dirname(os.path.expandvars(disk['path'])),
                )
                if ret != 0:
                    raise RuntimeError('Failed to revert disk')

            self._reclaim_disks()
            if was_alive:
                self.start()
Esempio n. 12
0
def sync_rpm_repository(repo_path, yum_config, repos):
    lock_path = os.path.join(repo_path, 'repolock')

    if not os.path.exists(repo_path):
        os.makedirs(repo_path)

    reposync_command = [
        'reposync',
        '--config=%s' % yum_config,
        '--download_path=%s' % repo_path,
        '--newest-only',
        '--delete',
        '--cachedir=%s/cache' % repo_path,
    ] + [
        '--repoid=%s' % repo for repo in repos
    ]

    with LockFile(lock_path, timeout=180):
        with LogTask('Running reposync'):
            ret, out, _ = run_command(reposync_command)
        if not ret:
            return

        _fix_reposync_issues(reposync_out=out, repo_path=repo_path)
        with LogTask('Rerunning reposync'):
            ret, _, _ = run_command(reposync_command)
        if not ret:
            return

        LOGGER.warn(
            'Failed to run reposync again, that usually means that '
            'some of the local rpms might be corrupted or the metadata '
            'invalid, cleaning caches and retrying a second time'
        )
        shutil.rmtree('%s/cache' % repo_path)
        with LogTask('Rerunning reposync a last time'):
            ret, out, err = run_command(reposync_command)
        if ret:
            LOGGER.error(
                'reposync command failed with following output: %s\n'
                'and following error: %s', out, err
            )
            raise RuntimeError(
                'Failed to run reposync a second time, aborting'
            )

        return
Esempio n. 13
0
def do_update(args):
    repos_dir = config.get('template_repos')
    ret, out, _ = utils.run_command(
        ['find', repos_dir, '-type', 'd', '-name', '.git'], )

    for line in [l.strip() for l in out.split('\n') if len(l)]:
        repo_path = os.path.dirname(line)
        print 'Updating %s' % repo_path

        for command in [
            ['git', 'fetch'],
            ['git', 'reset', '--hard'],
            ['git', 'checkout', 'origin/master'],
        ]:
            ret, _, _ = utils.run_command(command, cwd=repo_path)
            if ret:
                raise RuntimeError('Error running: %s' % (' '.join(command)))
Esempio n. 14
0
def merge(output_dir, sources, repoman_config=None):
    """
    Run repoman on ``sources``, creating a new RPM repository in
    ``output_dir``

    Args:
        output_dir(str): Path to create new repository
        sources(list of str): repoman sources
        repoman_config(str): repoman configuration file, if not passed it will
            use default repoman configurations, equivalent to:

            |  [main]
            |  on_empty_source=warn
            |  [store.RPMStore]
            |  on_wrong_distro=copy_to_all
            |  with_srcrpms=false
            |  with_sources=false

    Raises:
        :exc:`RepositoryMergeError`: If repoman command failed.
        :exc:`IOError`: If ``repoman_config`` is passed but does not exists.

    Returns:
        None
    """
    cmd = []
    cmd_suffix = [
        '--option=store.RPMStore.rpm_dir=', output_dir, 'add'
    ] + sources
    if repoman_config is None:
        repoman_params = [
            '--option=main.on_empty_source=warn',
            '--option=store.RPMStore.on_wrong_distro=copy_to_all',
            '--option=store.RPMStore.with_srcrpms=false',
            '--option=store.RPMStore.with_sources=false',
        ]
        cmd = ['repoman'] + repoman_params + cmd_suffix
    else:
        if os.path.isfile(repoman_config):
            cmd = ['repoman', '--config={0}'.format(repoman_config)
                   ] + cmd_suffix
        else:
            raise IOError(
                ('error running repoman, {0} not '
                 'found').format(repoman_config)
            )

    with LogTask('Running repoman'):
        res = run_command(cmd)
        if res.code:
            raise RepositoryMergeError(
                (
                    'Failed merging repoman sources: {0} into directory: {1}, '
                    'check lago.log for repoman output '
                ).format(sources, output_dir)
            )
Esempio n. 15
0
def merge(output_dir, sources):
    with LogTask('Running repoman'):
        res = run_command([
            'repoman', '--option=main.on_empty_source=warn',
            '--option=store.RPMStore.on_wrong_distro=copy_to_all',
            '--option=store.RPMStore.rpm_dir=', output_dir, 'add'
        ] + sources)
        if res.code:
            raise RepositoryMergeError('Failed to merge repos %s into %s' %
                                       (sources, output_dir))
Esempio n. 16
0
def sync_rpm_repository(repo_path, yum_config, repos):
    lock_path = os.path.join(repo_path, 'repolock')

    if not os.path.exists(repo_path):
        os.makedirs(repo_path)

    reposync_command = [
        'reposync',
        '--config=%s' % yum_config,
        '--download_path=%s' % repo_path,
        '--newest-only',
        '--delete',
        '--cachedir=%s/cache' % repo_path,
    ] + ['--repoid=%s' % repo for repo in repos]

    with LockFile(lock_path, timeout=180):
        with LogTask('Running reposync'):
            ret, out, _ = run_command(reposync_command)
        if not ret:
            return

        _fix_reposync_issues(reposync_out=out, repo_path=repo_path)
        with LogTask('Rerunning reposync'):
            ret, _, _ = run_command(reposync_command)
        if not ret:
            return

        LOGGER.warn(
            'Failed to run reposync again, that usually means that '
            'some of the local rpms might be corrupted or the metadata '
            'invalid, cleaning caches and retrying a second time')
        shutil.rmtree('%s/cache' % repo_path)
        with LogTask('Rerunning reposync a last time'):
            ret, out, err = run_command(reposync_command)
        if ret:
            LOGGER.error(
                'reposync command failed with following output: %s\n'
                'and following error: %s', out, err)
            raise RuntimeError(
                'Failed to run reposync a second time, aborting')

        return
Esempio n. 17
0
def do_add(args):
    ret, _, _ = utils.run_command(
        [
            'git',
            'clone',
            args.url,
        ],
        cwd=config.get('template_repos'),
    )
    if ret:
        raise RuntimeError('Failed to clone the repository')
Esempio n. 18
0
def do_update(args):
    repos_dir = config.get('template_repos')
    ret, out, _ = utils.run_command(
        [
            'find', repos_dir, '-type', 'd', '-name', '.git'
        ],
    )

    for line in [l.strip() for l in out.split('\n') if len(l)]:
        repo_path = os.path.dirname(line)
        print 'Updating %s' % repo_path

        for command in [
            ['git', 'fetch'],
            ['git', 'reset', '--hard'],
            ['git', 'checkout', 'origin/master'],
        ]:
            ret, _, _ = utils.run_command(command, cwd=repo_path)
            if ret:
                raise RuntimeError('Error running: %s' % (' '.join(command)))
Esempio n. 19
0
def do_add(args):
    ret, _, _ = utils.run_command(
        [
            'git',
            'clone',
            args.url,
        ],
        cwd=config.get('template_repos'),
    )
    if ret:
        raise RuntimeError('Failed to clone the repository')
Esempio n. 20
0
def exists(name):
    ret, out, _ = utils.run_command(
        ['ip', '-o', 'link', 'show', 'type', 'bridge'])
    if ret:
        raise RuntimeError('Failed to check if bridge {} exists'.format(name))

    for entry in out.decode('utf-8').splitlines():
        if name == entry.split(':')[1].strip():
            return True

    return False
Esempio n. 21
0
    def extract_image_xz(path):
        if not path.endswith('.xz'):
            os.rename(path, path + '.xz')
            path = path + '.xz'

        with log_utils.LogTask('Decompress local image', logger=LOGGER):
            ret = utils.run_command(
                ['xz', '--threads=0', '--decompress', path], )

        if ret:
            raise RuntimeError('Failed to decompress %s' % path)
Esempio n. 22
0
def initialize(templates_dir, clone_url):
    if os.path.exists(templates_dir):
        raise RuntimeError('Failed to initialize, path exists')
    os.makedirs(templates_dir)

    # Clone remote repo:
    ret, _, _ = utils.run_command(
        ['git', 'clone', clone_url, 'git-repo'],
        cwd=templates_dir
    )
    if ret != 0:
        raise RuntimeError('Failed to clone remote repository')
Esempio n. 23
0
def merge(output_dir, sources):
    with LogTask('Running repoman'):
        res = run_command(
            [
                'repoman', '--option=main.on_empty_source=warn',
                '--option=store.RPMStore.on_wrong_distro=copy_to_all',
                '--option=store.RPMStore.rpm_dir=', output_dir, 'add'
            ] + sources
        )
        if res.code:
            raise RepositoryMergeError(
                'Failed to merge repos %s into %s' % (sources, output_dir)
            )
Esempio n. 24
0
def merge(output_dir, input_dirs):
    try:
        os.makedirs(output_dir)
    except:
        sys.exc_clear()

    for input_dir in input_dirs:
        with LogTask('Processing directory %s' % input_dir):
            ret = run_command(
                [
                    'find',
                    input_dir,
                    '-type',
                    'f',
                    '-size',
                    '+0',
                    '-name',
                    '*.rpm',
                ]
            )

            if ret.code or not ret.out:
                raise RuntimeError('Could not find the RPMs in %s' % input_dir)

            rpm_paths = ret.out.strip().split('\n')
            for path in rpm_paths:
                if "i686" not in path:
                    _fastcopy(
                        path, os.path.join(
                            output_dir, os.path.basename(path)
                        )
                    )

    try:
        ret = run_command(['createrepo', output_dir], cwd=output_dir)
        if ret:
            raise RuntimeError('createrepo for %s failed', output_dir)
    except OSError:
        pass
Esempio n. 25
0
    def download(self, temp_ver, store_metadata=True):
        """
        Retrieve the given template version

        Args:
            temp_ver (TemplateVersion): template version to retrieve
            store_metadata (bool): If set to ``False``, will not refresh the
                local metadata with the retrieved one

        Returns:
            None
        """
        dest = self._prefixed(temp_ver.name)
        temp_dest = '%s.tmp' % dest

        with utils.LockFile(dest + '.lock'):
            # Image was downloaded while we were waiting
            if os.path.exists(dest):
                return

            temp_ver.download(temp_dest)
            if store_metadata:
                with open('%s.metadata' % dest, 'w') as f:
                    utils.json_dump(temp_ver.get_metadata(), f)

            sha1 = utils.get_hash(temp_dest)
            if temp_ver.get_hash() != sha1:
                raise RuntimeError(
                    'Image %s does not match the expected hash %s' % (
                        temp_ver.name,
                        sha1,
                    ))

            with open('%s.hash' % dest, 'w') as f:
                f.write(sha1)

            with log_utils.LogTask('Convert image', logger=LOGGER):
                result = utils.run_command([
                    'qemu-img',
                    'convert',
                    '-O',
                    'raw',
                    temp_dest,
                    dest,
                ], )

                os.unlink(temp_dest)
                if result:
                    raise RuntimeError(result.err)
Esempio n. 26
0
File: build.py Progetto: nirs/lago
    def build(self):
        """
        Run all the commands in self.build_cmds

        Raises:
            lago.build.BuildException: If a command returned a non-zero code
        """
        if not self.build_cmds:
            LOGGER.debug('No build commands were found, skipping build step')

        with LogTask('Building {} disk {}'.format(self.name, self.disk_path)):
            for command in self.build_cmds:
                with LogTask('Running command {}'.format(command.name)):
                    LOGGER.debug(command.cmd)
                    result = utils.run_command(command.cmd)
                    if result:
                        raise BuildException(result.err)
Esempio n. 27
0
File: build.py Progetto: tinez/lago
    def build(self):
        """
        Run all the commands in self.build_cmds

        Raises:
            lago.build.BuildException: If a command returned a non-zero code
        """
        if not self.build_cmds:
            LOGGER.debug('No build commands were found, skipping build step')

        with LogTask('Building {} disk {}'.format(self.name, self.disk_path)):
            for command in self.build_cmds:
                with LogTask('Running command {}'.format(command.name)):
                    LOGGER.debug(command.cmd)
                    result = utils.run_command(command.cmd)
                    if result:
                        raise BuildException(result.err)
Esempio n. 28
0
def run_command(command, **kwargs):
    """
    Wrapper around :func:`lago.utils.run_command` that prepends the ovirtlago
    LIBEXEC_DIR to the path if needed

    Args:
        command (?): parameter to send as the command parameter to
            :func:`lago.utils.run_command`
        **kwargs (?): keyword parameters to send as the command parameter to
            :func:`lago.utils.run_command`

    Returns:
        ?: Whatever :func:`lago.utils.run_command` returns
    """
    # add libexec to PATH if needed
    if constants.LIBEXEC_DIR not in os.environ['PATH'].split(':'):
        os.environ['PATH'] = '%s:%s' % (
            constants.LIBEXEC_DIR, os.environ['PATH']
        )
    return utils.run_command(command, **kwargs)
Esempio n. 29
0
def sysprep(disk, distro, loader=None, backend='direct', **kwargs):
    """
    Run virt-sysprep on the ``disk``, commands are built from the distro
    specific template and arguments passed in ``kwargs``. If no template is
    available it will default to ``sysprep-base.j2``.

    Args:
        disk(str): path to disk
        distro(str): distro to render template for
        loader(jinja2.BaseLoader): Jinja2 template loader, if not passed,
            will search Lago's package.
        backend(str): libguestfs backend to use
        **kwargs(dict): environment variables for Jinja2 template

    Returns:
        None

    Raises:
        RuntimeError: On virt-sysprep none 0 exit code.
    """

    if loader is None:
        loader = PackageLoader('lago', 'templates')
    sysprep_file = _render_template(distro, loader=loader, **kwargs)

    cmd = ['virt-sysprep', '-a', disk]
    cmd.extend(['--commands-from-file', sysprep_file])

    env = os.environ.copy()
    if 'LIBGUESTFS_BACKEND' not in env:
        env['LIBGUESTFS_BACKEND'] = backend

    ret = utils.run_command(cmd, env=env)
    if ret:
        raise RuntimeError(
            'Failed to bootstrap %s\ncommand:%s\nstdout:%s\nstderr:%s' % (
                disk,
                ' '.join('"%s"' % elem for elem in cmd),
                ret.out,
                ret.err,
            ))
Esempio n. 30
0
def _build_rpms(name, script, source_dir, output_dir, dists, env=None):
    with LogTask(
        'Build %s(%s) from %s, for %s, store results in %s' %
        (name, script, source_dir, ', '.join(dists), output_dir),
    ):
        ret, out, err = run_command(
            [
                script,
                source_dir,
                output_dir,
            ] + dists,
            env=env,
        )

        if ret:
            LOGGER.error('%s returned with error %d', script, ret, )
            LOGGER.error('Output was: \n%s', out)
            LOGGER.error('Errors were: \n%s', err)
            raise RuntimeError('%s failed, see logs' % script)

        return ret
Esempio n. 31
0
def _build_rpms(name, script, source_dir, output_dir, dists, env=None):
    with LogTask(
            'Build %s(%s) from %s, for %s, store results in %s' %
        (name, script, source_dir, ', '.join(dists), output_dir), ):
        ret, out, err = run_command(
            [
                script,
                source_dir,
                output_dir,
            ] + dists,
            env=env,
        )

        if ret:
            LOGGER.error(
                '%s returned with error %d',
                script,
                ret,
            )
            LOGGER.error('Output was: \n%s', out)
            LOGGER.error('Errors were: \n%s', err)
            raise RuntimeError('%s failed, see logs' % script)

        return ret
Esempio n. 32
0
def _set_link(name, state):
    ret, _, _ = utils.run_command(_IP + ['link', 'set', 'dev', name, state])
    if ret:
        raise RuntimeError('Could not set %s to state %s' % (name, state))
Esempio n. 33
0
        level=logging.DEBUG,
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    )

    working_dir = sys.argv[1]
    script_path = sys.argv[2]
    images = map(os.path.abspath, sys.argv[3:])

    with utils.RollbackContext() as rollback:
        # We will use each image we update as a snapshot, and if the update
        # is successfull we will merge
        for img in images:
            ret, _, _ = utils.run_command(
                [
                    'qemu-img', 'create', '-f', 'qcow2', '-b', img, updating(
                        img
                    )
                ]
            )
            if ret:
                raise RuntimeError('Failed to create copy of image')
            rollback.prependDefer(os.unlink, updating(img))
            # To avoid losing access once livirt changes ownership
            os.chmod(updating(img), 0666)

        config = {
            'nets': {
                NETWORK_NAME: {
                    'dhcp': {
                        'start': 100,
                        'end': 254,
Esempio n. 34
0
def _git_revision_at(path):
    ret, out, _ = run_command(['git', 'rev-parse', 'HEAD'], cwd=path)
    if ret:
        return 'unknown'
    return out.strip()
Esempio n. 35
0
def _brctl(command, *args):
    ret, out, err = utils.run_command(_BRCTL + [command] + list(args))
    if ret:
        raise RuntimeError('brctl %s failed\nrc: %d\n\nout:\n%s\n\nerr:\n%s' %
                           (command, ret, out, err))
    return ret, out, err
Esempio n. 36
0
def _git_revision_at(path):
    ret, out, _ = run_command(['git', 'rev-parse', 'HEAD'], cwd=path)
    if ret:
        return 'unknown'
    return out.strip()
Esempio n. 37
0
def qemu_img_convert(frm, frm_format, to, to_format):
    return utils.run_command(
        [
            'qemu-img', 'convert', '-f', frm_format, frm, '-O', to_format, to
        ]
    )
Esempio n. 38
0
def update(templates_dir):
    logging.info('Updating template directory at %s', templates_dir)

    git_repo = os.path.join(templates_dir, 'git-repo')

    ret, _, _ = utils.run_command(['git', 'fetch', 'origin'], cwd=git_repo)
    if ret != 0:
        logging.warning('Failed to access templates git repo')
        return

    ret, local_head, _ = utils.run_command(
        ['git', 'rev-parse', 'master'],
        cwd=git_repo
    )
    if ret != 0:
        raise RuntimeError('Failed to retrieve current revision')

    logging.debug('Fetching from remote repository')
    ret, remote_head, _ = utils.run_command(
        ['git', 'rev-parse', 'origin/master'],
        cwd=git_repo
    )
    if ret != 0:
        raise RuntimeError('Failed to retrieve remote revision')

    if remote_head != local_head:
        logging.debug('Local repository is not up to date, rebasing')
        ret, _, _ = utils.run_command(
            ['git', 'rebase', 'origin/master'],
            cwd=git_repo
        )
        if ret != 0:
            raise RuntimeError('Failed to rebase on remote master')

    for root, dirs, files in os.walk(git_repo):
        dirs[:] = [d for d in dirs if d != '.git']

        for filename in files:
            logging.debug('Checking if %s needs update.', filename)
            path_in_git = os.path.join(root, filename)
            rel_path = path_in_git[len(git_repo):].lstrip('/')
            path_outside_git = os.path.join(templates_dir, rel_path)

            try:
                with open('%s.hash' % path_outside_git) as f:
                    current_rev = f.read()
            except IOError:
                current_rev = ''

            ret, updated_rev, _ = utils.run_command(
                [
                    'git', 'log', '-n', '1', '--pretty=format:%H', '--',
                    rel_path
                ],
                cwd=git_repo
            )
            if ret != 0:
                raise RuntimeError('Failed to retrieve image revision')

            if current_rev != updated_rev:
                logging.debug('Updating %s', filename)
                if os.path.exists(path_outside_git):
                    os.unlink(path_outside_git)
                elif not os.path.exists(os.path.dirname(path_outside_git)):
                    os.makedirs(os.path.dirname(path_outside_git))
                ret, _, _ = qemu_img_convert(
                    path_in_git, 'qcow2', path_outside_git, 'raw'
                )

                if ret != 0:
                    raise RuntimeError('Failed to convert image')

                with open('%s.hash' % path_outside_git, 'w') as f:
                    f.write(updated_rev)