def test_get_sandbox_image():
    """Verify we return a sandbox image from the appropriate registry."""
    arch.return_value = 'foo'
    image_name = 'pause-{}:3.4.1'.format(arch.return_value)

    canonical_registry = 'rocks.canonical.com:443/cdk'
    related_registry = 'my.registry.com:5000'
    upstream_registry = 'k8s.gcr.io'

    # No registry and no k8s in our goal-state: return the upstream image
    goal.return_value = {}
    assert containerd.get_sandbox_image() == '{}/{}'.format(upstream_registry, image_name)

    # No registry and no goal-state: return upstream or canonical depending on remote units
    goal.side_effect = NotImplementedError()
    mock_rids.return_value = ['foo']
    mock_remote.return_value = 'not-kubernetes'
    assert containerd.get_sandbox_image() == '{}/{}'.format(upstream_registry, image_name)

    mock_rids.return_value = ['foo']
    mock_remote.return_value = 'kubernetes-control-plane'
    assert containerd.get_sandbox_image() == '{}/{}'.format(canonical_registry, image_name)

    # No registry with k8s in our goal-state: return the canonical image
    goal.return_value = {'relations': {'containerd': {'kubernetes-control-plane'}}}
    goal.side_effect = None
    assert containerd.get_sandbox_image() == '{}/{}'.format(canonical_registry, image_name)

    # A related registry should return registry[url]/image
    kv().set('registry', {'url': related_registry})
    assert containerd.get_sandbox_image() == '{}/{}'.format(related_registry, image_name)
Esempio n. 2
0
def config_changed():
    """
    Render the config template.

    :return: None
    """
    if _juju_proxy_changed():
        set_state('containerd.juju-proxy.changed')

    # Create "dumb" context based on Config to avoid triggering config.changed
    context = dict(config())

    config_file = 'config.toml'
    config_directory = '/etc/containerd'

    endpoint = endpoint_from_flag('endpoint.containerd.available')
    if endpoint:
        sandbox_image = endpoint.get_sandbox_image()
        if sandbox_image:
            log('Setting sandbox_image to: {}'.format(sandbox_image))
            context['sandbox_image'] = sandbox_image
        else:
            context['sandbox_image'] = containerd.get_sandbox_image()
    else:
        context['sandbox_image'] = containerd.get_sandbox_image()

    if not os.path.isdir(config_directory):
        os.mkdir(config_directory)

    # If custom_registries changed, make sure to remove old tls files.
    if config().changed('custom_registries'):
        old_custom_registries = config().previous('custom_registries')
    else:
        old_custom_registries = None

    context['custom_registries'] = \
        merge_custom_registries(config_directory, context['custom_registries'],
                                old_custom_registries)

    untrusted = DB.get('untrusted')
    if untrusted:
        context['untrusted'] = True
        context['untrusted_name'] = untrusted['name']
        context['untrusted_path'] = untrusted['binary_path']
        context['untrusted_binary'] = os.path.basename(
            untrusted['binary_path'])

    else:
        context['untrusted'] = False

    if is_state('containerd.nvidia.available') \
            and context.get('runtime') == 'auto':
        context['runtime'] = 'nvidia-container-runtime'
    if not is_state('containerd.nvidia.available') \
            and context.get('runtime') == 'auto':
        context['runtime'] = 'runc'

    render(config_file, os.path.join(config_directory, config_file), context)

    set_state('containerd.restart')
Esempio n. 3
0
def config_changed():
    """
    Render the config template and restart the service.

    :return: None
    """
    # Create "dumb" context based on Config to avoid triggering config.changed
    context = dict(config())

    config_file = 'config.toml'
    config_directory = '/etc/containerd'

    endpoint = endpoint_from_flag('endpoint.containerd.available')
    if endpoint:
        sandbox_image = endpoint.get_sandbox_image()
        if sandbox_image:
            log('Setting sandbox_image to: {}'.format(sandbox_image))
            context['sandbox_image'] = sandbox_image
        else:
            context['sandbox_image'] = containerd.get_sandbox_image()
    else:
        context['sandbox_image'] = containerd.get_sandbox_image()

    context['custom_registries'] = \
        merge_custom_registries(context['custom_registries'])

    untrusted = DB.get('untrusted')
    if untrusted:
        context['untrusted'] = True
        context['untrusted_name'] = untrusted['name']
        context['untrusted_path'] = untrusted['binary_path']
        context['untrusted_binary'] = os.path.basename(
            untrusted['binary_path'])

    else:
        context['untrusted'] = False

    if is_state('containerd.nvidia.available') \
            and context.get('runtime') == 'auto':
        context['runtime'] = 'nvidia-container-runtime'
    if not is_state('containerd.nvidia.available') \
            and context.get('runtime') == 'auto':
        context['runtime'] = 'runc'

    if not os.path.isdir(config_directory):
        os.mkdir(config_directory)

    render(config_file, os.path.join(config_directory, config_file), context)

    log('Restarting containerd.service')
    host.service_restart('containerd.service')

    if _check_containerd():
        status_set('active', 'Container runtime available')
        set_state('containerd.ready')

    else:
        status_set('blocked', 'Container runtime not available')
def test_get_sandbox_image(arch, goal, kv):
    '''Verify we return a sandbox image from the appropriate registry.'''
    arch.return_value = 'foo'
    image_name = 'pause-{}:3.1'.format(arch.return_value)

    canonical_registry = 'rocks.canonical.com:443/cdk'
    related_registry = 'my.registry.com:5000'
    upstream_registry = 'k8s.gcr.io'

    # No registry and no k8s in our goal-state: return the upstream image
    kv().get.return_value = {}
    goal.return_value = {}
    assert containerd.get_sandbox_image() == '{}/{}'.format(upstream_registry, image_name)

    # No registry and no goal-state: return upstream or canonical depending on remote units
    kv().get.return_value = {}
    goal.side_effect = NotImplementedError()
    with mock.patch('charmhelpers.core.hookenv.relation_ids') as mock_rids, \
            mock.patch('charmhelpers.core.hookenv.remote_service_name') as mock_remote:
        mock_rids.return_value = ['foo']
        mock_remote.return_value = 'not-kubernetes'
        assert containerd.get_sandbox_image() == '{}/{}'.format(upstream_registry, image_name)

        mock_rids.return_value = ['foo']
        mock_remote.return_value = 'kubernetes-master'
        assert containerd.get_sandbox_image() == '{}/{}'.format(canonical_registry, image_name)

    # No registry with k8s in our goal-state: return the canonical image
    kv().get.return_value = {}
    goal.return_value = {'relations': {'containerd': {'kubernetes-master'}}}
    goal.side_effect = None
    assert containerd.get_sandbox_image() == '{}/{}'.format(canonical_registry, image_name)

    # A related registry should return registry[url]/image
    kv().get.return_value = {'url': related_registry}
    assert containerd.get_sandbox_image() == '{}/{}'.format(related_registry, image_name)
def config_changed():
    """
    Render the config template.

    :return: None
    """
    if _juju_proxy_changed():
        set_state('containerd.juju-proxy.changed')

    # Create "dumb" context based on Config to avoid triggering config.changed
    context = dict(config())
    if context['config_version'] == "v2":
        template_config = "config_v2.toml"
    else:
        template_config = "config.toml"

    endpoint = endpoint_from_flag('endpoint.containerd.available')
    if endpoint:
        sandbox_image = endpoint.get_sandbox_image()
        if sandbox_image:
            log('Setting sandbox_image to: {}'.format(sandbox_image))
            context['sandbox_image'] = sandbox_image
        else:
            context['sandbox_image'] = containerd.get_sandbox_image()
    else:
        context['sandbox_image'] = containerd.get_sandbox_image()

    if not os.path.isdir(CONFIG_DIRECTORY):
        os.mkdir(CONFIG_DIRECTORY)

    # If custom_registries changed, make sure to remove old tls files.
    if config().changed('custom_registries'):
        old_custom_registries = config().previous('custom_registries')
    else:
        old_custom_registries = None

    # validate custom_registries
    invalid_reason = invalid_custom_registries(context['custom_registries'])
    if invalid_reason:
        status.blocked('Invalid custom_registries: {}'.format(invalid_reason))
        return

    context['custom_registries'] = \
        merge_custom_registries(CONFIG_DIRECTORY, context['custom_registries'],
                                old_custom_registries)

    untrusted = DB.get('untrusted')
    if untrusted:
        context['untrusted'] = True
        context['untrusted_name'] = untrusted['name']
        context['untrusted_path'] = untrusted['binary_path']
        context['untrusted_binary'] = os.path.basename(
            untrusted['binary_path'])

    else:
        context['untrusted'] = False

    if is_state('containerd.nvidia.available') \
            and context.get('runtime') == 'auto':
        context['runtime'] = 'nvidia-container-runtime'
    if not is_state('containerd.nvidia.available') \
            and context.get('runtime') == 'auto':
        context['runtime'] = 'runc'

    render(
        template_config,
        os.path.join(CONFIG_DIRECTORY, CONFIG_FILE),
        context
    )

    set_state('containerd.restart')