Example #1
0
def configure_mesh():
    endpoint_from_name('service-mesh').add_route(
        prefix='/jupyter/',
        rewrite='/',
        service=hookenv.service_name(),
        port=hookenv.config('port'),
    )
Example #2
0
def publish():
    for rel in reactive.endpoint_from_name('database').relations:
        if rel.application_name is not None:
            publish_credentials(rel, False)
            publish_general(rel)
    for rel in reactive.endpoint_from_name('database-admin').relations:
        if rel.application_name is not None:
            publish_credentials(rel, True)
            publish_general(rel)
    reactive.set_flag('cassandra.client.published')
Example #3
0
def configure_mesh():
    endpoint_from_name('service-mesh').add_route(
        prefix='/authservice',
        service=hookenv.service_name(),
        port=hookenv.config('port'),
        auth={
            'request_headers': ['cookie', 'X-Auth-Token'],
            'response_headers': ['kubeflow-userid'],
        },
    )
Example #4
0
def start_charm():
    if not hookenv.is_leader():
        hookenv.log("This unit is not a leader.")
        return False

    layer.status.maintenance('configuring container')

    image_info = layer.docker_resource.get_info('oci-image')

    api = endpoint_from_name('metadata-api').services()[0]
    grpc = endpoint_from_name('metadata-grpc').services()[0]

    port = hookenv.config('port')

    layer.caas_base.pod_spec_set({
        'version':
        2,
        'serviceAccount': {
            'rules': [
                {
                    'apiGroups': [''],
                    'resources': ['pods', 'pods/log'],
                    'verbs': ['create', 'get', 'list'],
                },
                {
                    'apiGroups': ['kubeflow.org'],
                    'resources': ['viewers'],
                    'verbs': ['create', 'get', 'list', 'watch', 'delete'],
                },
            ]
        },
        'containers': [{
            'name': 'metadata-ui',
            'imageDetails': {
                'imagePath': image_info.registry_path,
                'username': image_info.username,
                'password': image_info.password,
            },
            'ports': [{
                'name': 'http',
                'containerPort': port
            }],
            'config': {
                'METADATA_SERVICE_SERVICE_HOST': api['service_name'],
                'METADATA_SERVICE_SERVICE_PORT': api['hosts'][0]['port'],
                'METADATA_ENVOY_SERVICE_SERVICE_HOST': grpc['service_name'],
                'METADATA_ENVOY_SERVICE_SERVICE_PORT':
                grpc['hosts'][0]['port'],
            },
        }],
    })

    layer.status.maintenance('creating container')
    set_flag('charm.started')
Example #5
0
def get_internal_api_endpoints(relation=None):
    """
    Determine the best API endpoints for an internal client to connect to.

    If a relation is given, it will try to take that into account.

    May return an empty list if an endpoint is expected but not yet available.
    """
    try:
        goal_state = hookenv.goal_state()
    except NotImplementedError:
        goal_state = {}
    goal_state.setdefault("relations", {})

    # Config takes precedence.
    endpoints_from_config = get_endpoints_from_config()
    if endpoints_from_config:
        return endpoints_from_config

    # If the internal LB relation is attached, use that or nothing. If it's
    # not attached but the external LB relation is, use that or nothing.
    for lb_type in ("internal", "external"):
        lb_endpoint = "loadbalancer-" + lb_type
        request_name = "api-server-" + lb_type
        api_port = EXTERNAL_API_PORT if lb_type == "external" else STANDARD_API_PORT
        if lb_endpoint in goal_state["relations"]:
            lb_provider = endpoint_from_name(lb_endpoint)
            lb_response = lb_provider.get_response(request_name)
            if not lb_response or lb_response.error:
                return []
            return [(lb_response.address, api_port)]

    # Support the older loadbalancer relation (public-address interface).
    if "loadbalancer" in goal_state["relations"]:
        loadbalancer = endpoint_from_name("loadbalancer")
        lb_addresses = loadbalancer.get_addresses_ports()
        return [(host.get("public-address"), host.get("port"))
                for host in lb_addresses]

    # No LBs of any kind, so fall back to ingress-address.
    if not relation:
        kube_control = endpoint_from_name("kube-control")
        if not kube_control.relations:
            return []
        relation = kube_control.relations[0]
    ingress_address = hookenv.ingress_address(relation.relation_id,
                                              hookenv.local_unit())
    return [(ingress_address, STANDARD_API_PORT)]
Example #6
0
def reset_auth_keyspace_replication():
    # Cassandra requires you to manually set the replication factor of
    # the system_auth keyspace, to ensure availability and redundancy.
    # The recommendation is to set the replication factor so that every
    # node has a copy.
    ep = reactive.endpoint_from_name('cluster')
    num_nodes = len(ep.all_bootstrapped_units) + 1
    datacenter = cassandra.config()['datacenter']
    with cassandra.connect() as session:
        strategy_opts = cassandra.get_auth_keyspace_replication(session)
        rf = int(strategy_opts.get(datacenter, -1))
        hookenv.log('Current system_auth replication strategy is {!r}'.format(strategy_opts))
        if rf != num_nodes:
            strategy_opts['class'] = 'NetworkTopologyStrategy'
            strategy_opts[datacenter] = num_nodes
            if 'replication_factor' in strategy_opts:
                del strategy_opts['replication_factor']
            hookenv.log('New system_auth replication strategy is {!r}'.format(strategy_opts))
            status, msg = hookenv.status_get()
            helpers.status_set(status, 'Updating system_auth rf to {!r}'.format(strategy_opts))
            cassandra.set_auth_keyspace_replication(session, strategy_opts)
            if rf < num_nodes:
                # Increasing rf, need to run repair.
                cassandra.repair_auth_keyspace()
            helpers.status_set(status, msg)
    reactive.set_flag('cassandra.authkeyspace.done')
def resync_pools(args):
    """Force image resync on pools in local Ceph endpoint."""
    if not ch_core.hookenv.action_get('i-really-mean-it'):
        ch_core.hookenv.action_fail('Required parameter not set')
        return
    with charms_openstack.charm.provide_charm_instance() as charm:
        ceph_local = reactive.endpoint_from_name('ceph-local')
        pools = charm.eligible_pools(ceph_local.pools)
        result = collections.defaultdict(dict)
        for pool in pools:
            # list images in pool
            output = subprocess.check_output([
                'rbd', '--id', charm.ceph_id, '--format', 'json', '-p', pool,
                'ls'
            ],
                                             universal_newlines=True)
            images = json.loads(output)
            for image in images:
                output = subprocess.check_output([
                    'rbd', '--id', charm.ceph_id, 'mirror', 'image', 'resync',
                    '{}/{}'.format(pool, image)
                ],
                                                 universal_newlines=True)
                result[pool][image] = output.rstrip()
        output_str = ''
        for pool in result:
            for image in result[pool]:
                if output_str:
                    output_str += '\n'
                output_str += '{}/{}: {}'.format(pool, image,
                                                 result[pool][image])
        ch_core.hookenv.action_set({'output': output_str})
def start_charm():
    layer.status.maintenance('configuring container')

    image_info = layer.docker_resource.get_info('oci-image')

    api = endpoint_from_name(
        'pipelines-api').services()[0]['hosts'][0]['hostname']

    layer.caas_base.pod_spec_set({
        'omitServiceFrontend':
        True,
        'containers': [{
            'name':
            'pipelines-persistence',
            'args': [
                'persistence_agent',
                '--alsologtostderr=true',
                f'--mlPipelineAPIServerName={api}',
                f'--namespace={os.environ["JUJU_MODEL_NAME"]}',
            ],
            'imageDetails': {
                'imagePath': image_info.registry_path,
                'username': image_info.username,
                'password': image_info.password,
            },
        }],
    })

    layer.status.maintenance('creating container')
    set_flag('charm.started')
def handle_requests():
    db = endpoint_from_name('database')
    users = unitdata.kv().get('charm.users', {})
    root_password = unitdata.kv().get('charm.root-password')
    connection = mysql.connector.connect(user='******',
                                         password=root_password,
                                         host='mariadb')
    cursor = None
    try:
        cursor = connection.cursor()
        for request in db.new_requests:
            # determine db_name, username, and password for request,
            # generating each if needed
            if request.application_name not in users:
                users[request.application_name] = (host.pwgen(20),
                                                   host.pwgen(20))
            username, password = users[request.application_name]
            db_name = request.database_name or request.application_name

            # create the database and grant the user access
            layer.mariadb_k8s.create_database(cursor, db_name)
            if not layer.mariadb_k8s.grant_exists(cursor, db_name, username,
                                                  request.address):
                layer.mariadb_k8s.create_grant(cursor, db_name, username,
                                               password, request.address)

            # fulfill this request
            request.provide_database(db_name, username, password)
        cursor.commit()
    finally:
        if cursor:
            cursor.close()
        connection.close()
Example #10
0
def connect(username=None, password=None, timeout=CONNECT_TIMEOUT):
    # We pull the currently configured listen address and port from the
    # yaml, rather than the service configuration, as it may have been
    # overridden.
    cassandra_yaml = read_cassandra_yaml()
    address = cassandra_yaml['rpc_address']
    if address == '0.0.0.0':
        address = 'localhost'
    port = cassandra_yaml['native_transport_port']

    auth_provider = get_auth_provider(username, password)

    addresses = set([address])
    cluster_ep = reactive.endpoint_from_name('cluster')
    addresses.update(cluster_ep.get_bootstrapped_ips())

    # Although we specify a reconnection_policy, it does not apply to
    # the initial connection so we retry in a loop.
    start = time.time()
    until = start + timeout
    while True:
        cluster = cassandra.cluster.Cluster(list(addresses), port=port, auth_provider=auth_provider)
        try:
            session = cluster.connect()
            session.default_timeout = timeout
            break
        except cassandra.cluster.NoHostAvailable as x:
            cluster.shutdown()
            if time.time() > until:
                raise
        time.sleep(1)
    try:
        yield session
    finally:
        cluster.shutdown()
Example #11
0
def handle_mysql_requests():
    mysql_clients = endpoint_from_name('rds-mysql')
    mysql_rds = layer.aws.MySQLRDSManager()
    reqs = {
        req: app
        for req, app in mysql_clients.database_requests().items()
        if app and req not in (mysql_rds.failed_creates | set(mysql_rds.active)
                               | set(mysql_rds.pending))
    }

    for req, app in reqs.items():
        layer.status.maintenance('Creating RDS MySQL database for ' + app)
        mysql_rds.create_db(req)

    if mysql_rds.pending:
        layer.status.maintenance('Waiting for RDS MySQL databases')
        completed = mysql_rds.poll_pending()
        for req, db in completed.items():
            mysql_clients.provide_database(req,
                                           host=db['host'],
                                           port=db['port'],
                                           database_name=db['database'],
                                           user=db['username'],
                                           password=db['password'])

    if mysql_rds.failed_creates:
        layer.status.blocked('Failed to create one or '
                             'more RDS MySQL databases')
    elif mysql_rds.pending:
        layer.status.waiting('Waiting for RDS MySQL databases')
    else:
        layer.status.active('Ready')
def start_charm():
    if not hookenv.is_leader():
        hookenv.log("This unit is not a leader.")
        return False

    layer.status.maintenance('configuring container')

    image_info = layer.docker_resource.get_info('oci-image')

    mysql = endpoint_from_name('mysql')

    port = hookenv.config('port')

    layer.caas_base.pod_spec_set({
        'version':
        2,
        'containers': [{
            'name': 'katib-manager',
            'command': ["./katib-db-manager"],
            'imageDetails': {
                'imagePath': image_info.registry_path,
                'username': image_info.username,
                'password': image_info.password,
            },
            'ports': [{
                'name': 'api',
                'containerPort': port
            }],
            'config': {
                'DB_NAME': 'mysql',
                'DB_USER': '******',
                'DB_PASSWORD': mysql.root_password(),
                'KATIB_MYSQL_DB_HOST': mysql.host(),
                'KATIB_MYSQL_DB_PORT': mysql.port(),
                'KATIB_MYSQL_DB_DATABASE': 'katib',
            },
            'kubernetes': {
                'livenessProbe': {
                    'exec': {
                        'command':
                        ["/bin/grpc_health_probe", f"-addr=:{port}"]
                    },
                    'initialDelaySeconds': 10,
                },
                'readinessProbe': {
                    'exec': {
                        'command':
                        ["/bin/grpc_health_probe", f"-addr=:{port}"]
                    },
                    'initialDelaySeconds': 10,
                    'periodSeconds': 60,
                    'failureThreshold': 5,
                },
            },
        }],
    })

    layer.status.maintenance('creating container')
    clear_flag('mysql.changed')
    set_flag('charm.started')
Example #13
0
def start_charm():
    layer.status.maintenance('configuring container')

    image_info = layer.docker_resource.get_info('oci-image')

    backend = endpoint_from_name('modeldb-backend').services()[0]

    port = hookenv.config('port')

    layer.caas_base.pod_spec_set({
        'version':
        2,
        'containers': [{
            'name': 'modeldb-ui',
            'imageDetails': {
                'imagePath': image_info.registry_path,
                'username': image_info.username,
                'password': image_info.password,
            },
            'ports': [{
                'name': 'port',
                'containerPort': port
            }],
            'config': {
                'BACKEND_API_DOMAIN': backend['service_name'],
                'BACKEND_API_PORT': backend['hosts'][0]['port'],
            },
        }],
    })

    layer.status.maintenance('creating container')
    set_flag('charm.started')
Example #14
0
def rbd_mirror_action(args):
    """Perform RBD command on pools in local Ceph endpoint."""
    action_name = os.path.basename(args[0])
    with charms_openstack.charm.provide_charm_instance() as charm:
        ceph_local = reactive.endpoint_from_name('ceph-local')
        pools = get_pools()
        if not pools:
            pools = charm.eligible_pools(ceph_local.pools)
        result = {}
        cmd = ['rbd', '--id', charm.ceph_id, 'mirror', 'pool', action_name]
        if ch_core.hookenv.action_get('force'):
            cmd += ['--force']
        if ch_core.hookenv.action_get('verbose'):
            cmd += ['--verbose']
        output_format = ch_core.hookenv.action_get('format')
        if output_format:
            cmd += ['--format', output_format]
        for pool in pools:
            output = subprocess.check_output(cmd + [pool],
                                             stderr=subprocess.STDOUT,
                                             universal_newlines=True)
            if output_format == 'json':
                result[pool] = json.loads(output)
            else:
                result[pool] = output.rstrip()
        if output_format == 'json':
            ch_core.hookenv.action_set({'output': json.dumps(result)})
        else:
            output_str = ''
            for pool, output in result.items():
                if output_str:
                    output_str += '\n'
                output_str += '{}: {}'.format(pool, output)
            ch_core.hookenv.action_set({'output': output_str})
def provide_lb_consumers():
    '''Respond to any LB requests via the lb-consumers relation.

    This is used in favor for the more complex two relation setup using the
    website and loadbalancer relations going forward.
    '''
    lb_consumers = endpoint_from_name('lb-consumers')
    lb_address = _get_lb_address()
    for request in lb_consumers.all_requests:
        response = request.response
        if request.protocol not in (request.protocols.tcp,
                                    request.protocols.http,
                                    request.protocols.https):
            response.error_type = response.error_types.unsupported
            response.error_fields = {
                'protocol': 'Protocol must be one of: tcp, http, https'
            }
            lb_consumers.send_response(request)
            continue
        if lb_address:
            private_address = lb_address
            public_address = lb_address
        else:
            network_info = hookenv.network_get('lb-consumers',
                                               str(request.relation.id))
            private_address = network_info['ingress-addresses'][0]
            public_address = hookenv.unit_get('public-address')
        if request.public:
            response.address = public_address
        else:
            response.address = private_address
        lb_consumers.send_response(request)
Example #16
0
def handle_mysql_requests():
    mysql_clients = endpoint_from_name("rds-mysql")
    mysql_rds = layer.aws.MySQLRDSManager()
    reqs = {
        req: app
        for req, app in mysql_clients.database_requests().items()
        if app and req not in (mysql_rds.failed_creates | set(mysql_rds.active)
                               | set(mysql_rds.pending))
    }

    for req, app in reqs.items():
        layer.status.maintenance("Creating RDS MySQL database for " + app)
        mysql_rds.create_db(req)

    if mysql_rds.pending:
        layer.status.maintenance("Waiting for RDS MySQL databases")
        completed = mysql_rds.poll_pending()
        for req, db in completed.items():
            mysql_clients.provide_database(
                req,
                host=db["host"],
                port=db["port"],
                database_name=db["database"],
                user=db["username"],
                password=db["password"],
            )

    if mysql_rds.failed_creates:
        layer.status.blocked("Failed to create one or "
                             "more RDS MySQL databases")
    elif mysql_rds.pending:
        layer.status.waiting("Waiting for RDS MySQL databases")
    else:
        layer.status.active("Ready")
Example #17
0
def update_policies():
    """
    Check for new policy definitions and update as necessary.
    """
    stats = {
        "new": 0,
        "updated": 0,
        "up-to-date": 0,
    }

    def _update_policy(policy_name, policy_arn):
        # check for and update (if needed) a specific policy
        try:
            if _policy_needs_update(policy_arn):
                _add_new_policy_version(policy_arn)
                stats["updated"] += 1
                log("Updated policy {}", policy_name)
            else:
                stats["up-to-date"] += 1
                log("Policy {} up to date", policy_name)
        except DoesNotExistAWSError:
            _ensure_policy(policy_name)
            stats["new"] += 1

    # loop over all policies we currently support (files on disk)
    policies = {f.stem for f in Path("files/policies").glob("*.json")}
    for policy_name in policies:
        if _is_restricted_policy(policy_name):
            # this policy file's contents are not generic; it has data which
            # depends on relation data, which will be handled below
            continue
        policy_arn = _get_policy_arn(policy_name)
        _update_policy(policy_name, policy_arn)
    # loop over all relation data looking for parameterized policies
    aws = endpoint_from_name("aws")
    for request in aws.all_requests:
        if (request.requested_object_storage_access
                and request.object_storage_access_patterns):
            # regenerate the app-specific policy .json file, so that we can
            # use that data to compare against the actual policy in AWS
            policy_name = _restrict_policy_for_app(
                "s3-read",
                request.application_name,
                request.object_storage_access_patterns,
            )
            policy_arn = _get_policy_arn(policy_name)
            _update_policy(policy_name, policy_arn)
        if (request.requested_object_storage_management
                and request.object_storage_management_patterns):
            # regenerate the app-specific policy .json file, so that we can
            # use that data to compare against the actual policy in AWS
            policy_name = _restrict_policy_for_app(
                "s3-write",
                request.application_name,
                request.object_storage_management_patterns,
            )
            policy_arn = _get_policy_arn(policy_name)
            _update_policy(policy_name, policy_arn)
    return stats
def install_load_balancer():
    ''' Create the default vhost template for load balancing '''
    apiserver = endpoint_from_name('apiserver')
    lb_consumers = endpoint_from_name('lb-consumers')

    if not (server_crt_path.exists() and server_key_path.exists()):
        hookenv.log('Skipping due to missing cert')
        return
    if not (apiserver.services() or lb_consumers.all_requests):
        hookenv.log('Skipping due to requests not ready')
        return

    # At this point the cert and key exist, and they are owned by root.
    chown = ['chown', 'www-data:www-data', str(server_crt_path)]

    # Change the owner to www-data so the nginx process can read the cert.
    subprocess.call(chown)
    chown = ['chown', 'www-data:www-data', str(server_key_path)]

    # Change the owner to www-data so the nginx process can read the key.
    subprocess.call(chown)

    servers = {}
    if apiserver and apiserver.services():
        servers[hookenv.config('port')] = {(h['hostname'], h['port'])
                                           for service in apiserver.services()
                                           for h in service['hosts']}
    for request in lb_consumers.all_requests:
        for server_port in request.port_mapping.keys():
            service = servers.setdefault(server_port, set())
            service.update(
                (backend, backend_port)
                for backend, backend_port in itertools.product(
                    request.backends, request.port_mapping.values()))
    nginx.configure_site(
        'apilb',
        'apilb.conf',
        servers=servers,
        server_certificate=str(server_crt_path),
        server_key=str(server_key_path),
        proxy_read_timeout=hookenv.config('proxy_read_timeout'))

    maybe_write_apilb_logrotate_config()
    for listen_port in servers.keys():
        hookenv.open_port(listen_port)
    status.active('Loadbalancer ready.')
Example #19
0
def get_bootstrapped_ips():
    ips = set()
    if reactive.is_flag_set('cassandra.bootstrapped'):
        ips.add(cassandra.listen_ip_address())
    u = reactive.endpoint_from_name('cluster')
    if u is not None:
        ips.update(u.get_bootstrapped_ips())
    return ips
Example #20
0
def start_charm():
    if not hookenv.is_leader():
        hookenv.log("This unit is not a leader.")
        return False

    layer.status.maintenance("configuring container")

    image_info = layer.docker_resource.get_info("oci-image")

    mysql = endpoint_from_name("mysql")

    port = hookenv.config("port")

    layer.caas_base.pod_spec_set(
        {
            "version": 3,
            "containers": [
                {
                    "name": "katib-db-manager",
                    "command": ["./katib-db-manager"],
                    "imageDetails": {
                        "imagePath": image_info.registry_path,
                        "username": image_info.username,
                        "password": image_info.password,
                    },
                    "ports": [{"name": "api", "containerPort": port}],
                    "envConfig": {
                        "DB_NAME": "mysql",
                        "DB_USER": "******",
                        "DB_PASSWORD": mysql.root_password(),
                        "KATIB_MYSQL_DB_HOST": mysql.host(),
                        "KATIB_MYSQL_DB_PORT": mysql.port(),
                        "KATIB_MYSQL_DB_DATABASE": "katib",
                    },
                    "kubernetes": {
                        "readinessProbe": {
                            "exec": {
                                "command": ["/bin/grpc_health_probe", f"-addr=:{port}"]
                            },
                            "initialDelaySeconds": 5,
                        },
                        "livenessProbe": {
                            "exec": {
                                "command": ["/bin/grpc_health_probe", f"-addr=:{port}"]
                            },
                            "initialDelaySeconds": 10,
                            "periodSeconds": 60,
                            "failureThreshold": 5,
                        },
                    },
                }
            ],
        },
    )

    layer.status.maintenance("creating container")
    clear_flag("mysql.changed")
    set_flag("charm.started")
Example #21
0
def start_charm():
    if not hookenv.is_leader():
        hookenv.log("This unit is not a leader.")
        return False

    layer.status.maintenance('configuring container')

    image_info = layer.docker_resource.get_info('oci-image')

    admin_port = hookenv.config('admin-port')
    port = hookenv.config('port')

    grpc = endpoint_from_name('metadata-grpc').services()[0]
    envoy_yaml = Template(Path('files/envoy.yaml.tmpl').read_text()).render(
        port=port,
        grpc_host=grpc['service_name'],
        grpc_port=grpc['hosts'][0]['port'],
        admin_port=admin_port,
    )

    layer.caas_base.pod_spec_set(
        spec={
            'version':
            2,
            'containers': [{
                'name':
                'metadata-envoy',
                'imageDetails': {
                    'imagePath': image_info.registry_path,
                    'username': image_info.username,
                    'password': image_info.password,
                },
                'command': ['/usr/local/bin/envoy'],
                'args': ['-c', '/config/envoy.yaml'],
                'ports': [
                    {
                        'name': 'grpc',
                        'containerPort': port
                    },
                    {
                        'name': 'admin',
                        'containerPort': admin_port
                    },
                ],
                'files': [{
                    'name': 'config',
                    'mountPath': '/config',
                    'files': {
                        'envoy.yaml': envoy_yaml
                    },
                }],
            }],
        })

    layer.status.maintenance('creating container')
    set_flag('charm.started')
Example #22
0
def configure_proxy():
    """Configure reverse proxy settings when haproxy is related."""
    hookenv.status_set("maintenance", "Applying reverse proxy configuration")
    hookenv.log("Configuring reverse proxy via: {}".format(hookenv.remote_unit()))

    interface = endpoint_from_name("reverseproxy")
    gitlab.configure_proxy(interface)

    hookenv.status_set("active", HEALTHY)
    set_flag("reverseproxy.configured")
Example #23
0
def get_kube_api_servers():
    """Return the list of kubernetes API endpoint URLs."""
    kube_control = endpoint_from_name("kube-control")
    kube_api = endpoint_from_name("kube-api-endpoint")
    # prefer kube-api-endpoints
    if kube_api.services():
        return [
            "https://{0}:{1}".format(unit["hostname"], unit["port"])
            for service in kube_api.services()
            for unit in service["hosts"]
        ]
    if hasattr(kube_control, "get_api_endpoints"):
        return kube_control.get_api_endpoints()
    hookenv.log(
        "Unable to determine API server URLs from either kube-control "
        "or kube-api-endpoint relation",
        hookenv.ERROR,
    )
    return []
Example #24
0
def start_charm():
    layer.status.maintenance('configuring container')

    mysql = endpoint_from_name('mysql')

    image_info = layer.docker_resource.get_info('oci-image')

    port = hookenv.config('port')
    db_name = hookenv.config('database-name')

    layer.caas_base.pod_spec_set(
        spec={
            'version': 2,
            'containers': [
                {
                    'name': 'metadata-api',
                    'command': [
                        "./server/server",
                        f"--http_port={port}",
                        f"--mysql_service_host={mysql.host()}",
                        f"--mysql_service_port={mysql.port()}",
                        "--mysql_service_user=root",
                        f"--mysql_service_password={mysql.root_password()}",
                        f"--mlmd_db_name={db_name}",
                    ],
                    'imageDetails': {
                        'imagePath': image_info.registry_path,
                        'username': image_info.username,
                        'password': image_info.password,
                    },
                    'ports': [{'name': 'http', 'containerPort': port}],
                    'config': {'MYSQL_ROOT_PASSWORD': mysql.root_password()},
                    'kubernetes': {
                        'readinessProbe': {
                            'httpGet': {
                                'path': '/api/v1alpha1/artifact_types',
                                'port': 'http',
                                'httpHeaders': [
                                    {'name': 'ContentType', 'value': 'application/json'}
                                ],
                            },
                            'initialDelaySeconds': 3,
                            'periodSeconds': 5,
                            'timeoutSeconds': 2,
                        }
                    },
                }
            ],
        }
    )

    layer.status.maintenance('creating container')
    set_flag('charm.started')
Example #25
0
def configure_mysql():
    mysql = endpoint_from_name('mysql')

    for i in range(len(mysql.relations)):
        mysql.provide_database(
            request_id=i,
            database_name=hookenv.config('database'),
            port=hookenv.config('port'),
            host=hookenv.application_name(),
            user='******',
            password=hookenv.config('root-password'),
        )
Example #26
0
def configure_proxy():
    hookenv.status_set(
        'maintenance',
        'Applying reverse proxy configuration')
    hookenv.log("Configuring reverse proxy via: {}".format(
        hookenv.remote_unit()))

    interface = endpoint_from_name('reverseproxy')
    taskd.configure_proxy(interface)

    hookenv.status_set('active', HEALTHY)
    set_flag('reverseproxy.configured')
def start_charm():
    if not hookenv.is_leader():
        hookenv.log("This unit is not a leader.")
        return False

    layer.status.maintenance('configuring container')

    image_info = layer.docker_resource.get_info('oci-image')

    api = endpoint_from_name(
        'pipelines-api').services()[0]['hosts'][0]['hostname']

    layer.caas_base.pod_spec_set({
        'version':
        2,
        'serviceAccount': {
            'global':
            True,
            'rules': [
                {
                    'apiGroups': ['argoproj.io'],
                    'resources': ['workflows'],
                    'verbs': ['get', 'list', 'watch'],
                },
                {
                    'apiGroups': ['kubeflow.org'],
                    'resources': ['scheduledworkflows'],
                    'verbs': ['get', 'list', 'watch'],
                },
            ],
        },
        'containers': [{
            'name':
            'pipelines-persistence',
            'args': [
                'persistence_agent',
                '--alsologtostderr=true',
                f'--mlPipelineAPIServerName={api}',
                f'--namespace={os.environ["JUJU_MODEL_NAME"]}',
            ],
            'imageDetails': {
                'imagePath': image_info.registry_path,
                'username': image_info.username,
                'password': image_info.password,
            },
            'config': {
                'POD_NAMESPACE': os.environ['JUJU_MODEL_NAME']
            },
        }],
    })

    layer.status.maintenance('creating container')
    set_flag('charm.started')
Example #28
0
def configure():
    layer.status.maintenance('Configuring snap-proxy')
    db = endpoint_from_name('db')
    public_ip = hookenv.unit_public_ip()
    run(['snap-proxy', 'config', f'proxy.db.connection={db.master.uri}'],
        check=True)
    run(['snap-proxy', 'config', f'proxy.domain={public_ip}'],
        check=True)
    run(['snap-proxy', 'generate-keys'], check=True)

    hookenv.open_port(80)
    set_flag('snap-store-proxy.configured')
def _get_lb_port(prefer_private=True):
    lb_consumers = endpoint_from_name('lb-consumers')

    # prefer a port from the newer, more explicit relations
    public = filter(lambda r: r.public, lb_consumers.all_requests)
    private = filter(lambda r: not r.public, lb_consumers.all_requests)
    lb_reqs = (private, public) if prefer_private else (public, private)
    for lb_req in itertools.chain(*lb_reqs):
        return list(lb_req.port_mapping)[0]

    # fall back to the config
    return hookenv.config('port')
def configure():
    db = endpoint_from_name('db')
    public_ip = hookenv.unit_public_ip()
    run(['snap-proxy', 'config', f'proxy.db.connection={db.master.uri}'],
        check=True)
    run(['snap-proxy', 'config', f'proxy.domain={public_ip}'],
        check=True)
    run(['snap-proxy', 'generate-keys'], check=True)
    hookenv.open_port(80)
    layer.status.blocked(f'Please run: '
                         f'juju ssh {hookenv.local_unit()} '
                         f'"sudo snap-proxy register"')
    set_flag('configured')
Example #31
0
def get_external_api_endpoints():
    """
    Determine the best API endpoints for an external client to connect to.

    May return an empty list if an endpoint is expected but not yet available.
    """
    try:
        goal_state = hookenv.goal_state()
    except NotImplementedError:
        goal_state = {}
    goal_state.setdefault("relations", {})

    # Config takes precedence.
    endpoints_from_config = get_endpoints_from_config()
    if endpoints_from_config:
        return endpoints_from_config

    # If the external LB relation is attached, use that or nothing. If it's
    # not attached but the internal LB relation is, use that or nothing.
    for lb_type in ("external", "internal"):
        lb_endpoint = "loadbalancer-" + lb_type
        lb_name = "api-server-" + lb_type
        api_port = EXTERNAL_API_PORT if lb_type == "external" else STANDARD_API_PORT
        if lb_endpoint in goal_state["relations"]:
            lb_provider = endpoint_from_name(lb_endpoint)
            lb_response = lb_provider.get_response(lb_name)
            if not lb_response or lb_response.error:
                return []
            return [(lb_response.address, api_port)]

    # Support the older loadbalancer relation (public-address interface).
    if "loadbalancer" in goal_state["relations"]:
        loadbalancer = endpoint_from_name("loadbalancer")
        lb_addresses = loadbalancer.get_addresses_ports()
        return [(host.get("public-address"), host.get("port"))
                for host in lb_addresses]

    # No LBs of any kind, so fall back to public-address.
    return [(hookenv.unit_public_ip(), STANDARD_API_PORT)]
Example #32
0
def start_charm():
    layer.status.maintenance('configuring container')

    manager_image = layer.docker_resource.get_info('manager-image')
    restful_image = layer.docker_resource.get_info('restful-image')

    mysql = endpoint_from_name('mysql')

    manager_port = hookenv.config('manager-port')
    restful_port = hookenv.config('restful-port')

    layer.caas_base.pod_spec_set(
        {
            'containers': [
                {
                    'name': 'katib-manager',
                    'command': ["./katib-manager"],
                    'imageDetails': {
                        'imagePath': manager_image.registry_path,
                        'username': manager_image.username,
                        'password': manager_image.password,
                    },
                    'ports': [{'name': 'manager', 'containerPort': manager_port}],
                    'config': {'MYSQL_ROOT_PASSWORD': mysql.password()},
                    'livenessProbe': {
                        'exec': {'command': ["/bin/grpc_health_probe", f"-addr=:{manager_port}"]},
                        'initialDelaySeconds': 10,
                    },
                    'readinessProbe': {
                        'exec': {'command': ["/bin/grpc_health_probe", f"-addr=:{manager_port}"]},
                        'initialDelaySeconds': 5,
                    },
                },
                {
                    'name': 'katib-manager-rest',
                    'command': ["./katib-manager-rest"],
                    'imageDetails': {
                        'imagePath': restful_image.registry_path,
                        'username': restful_image.username,
                        'password': restful_image.password,
                    },
                    'ports': [{'name': 'restful', 'containerPort': restful_port}],
                },
            ]
        }
    )

    layer.status.maintenance('creating container')
    set_flag('charm.started')
Example #33
0
def start_charm():
    if not hookenv.is_leader():
        hookenv.log("This unit is not a leader.")
        return False

    layer.status.maintenance('configuring container')

    image_info = layer.docker_resource.get_info('oci-image')

    mysql = endpoint_from_name('mysql')

    port = hookenv.config('port')
    db_name = hookenv.config('database-name')

    layer.caas_base.pod_spec_set(
        spec={
            'version':
            2,
            'containers': [{
                'name':
                'metadata-grpc',
                'imageDetails': {
                    'imagePath': image_info.registry_path,
                    'username': image_info.username,
                    'password': image_info.password,
                },
                'command': ['/bin/metadata_store_server'],
                'args': [
                    f"--grpc_port={port}",
                    f"--mysql_config_database={db_name}",
                    f"--mysql_config_host={mysql.host()}",
                    f"--mysql_config_port={mysql.port()}",
                    "--mysql_config_user=root",
                    f"--mysql_config_password={mysql.root_password()}",
                ],
                'ports': [{
                    'name': 'grpc',
                    'containerPort': port
                }],
                'config': {
                    'METADATA_GRPC_SERVICE_HOST': hookenv.service_name(),
                    'METADATA_GRPC_SERVICE_PORT': port,
                },
            }],
        })

    layer.status.maintenance('creating container')
    set_flag('charm.started')
Example #34
0
def start_charm():
    layer.status.maintenance('configuring container')

    image_info = layer.docker_resource.get_info('oci-image')

    redis = endpoint_from_name('redis')

    rest_port = hookenv.config('rest-port')
    grpc_port = hookenv.config('grpc-port')

    layer.caas_base.pod_spec_set({
        'containers': [{
            'name':
            'seldon-apiserver',
            'imageDetails': {
                'imagePath': image_info.registry_path,
                'username': image_info.username,
                'password': image_info.password,
            },
            'ports': [
                {
                    'name': 'rest',
                    'containerPort': rest_port
                },
                {
                    'name': 'grpc',
                    'containerPort': grpc_port
                },
            ],
            'config': {
                'SELDON_CLUSTER_MANAGER_REDIS_HOST':
                redis.all_joined_units[0].application_name,
                'SELDON_CLUSTER_MANAGER_POD_NAMESPACE':
                os.environ['JUJU_MODEL_NAME'],
                'SELDON_ENGINE_KAFKA_SERVER':
                'kafka:9092',
                'SELDON_SINGLE_NAMESPACE':
                True,
            },
        }]
    })

    layer.status.maintenance('creating container')
    set_flag('charm.started')
Example #35
0
def mirror():
    for relname in ['database', 'database-admin']:
        for rel in reactive.endpoint_from_name(relname).relations:
            mirror_credentials(rel)
            publish_general(rel)