def __init__(self, *args):
        super().__init__(*args)
        if not self.unit.is_leader():
            # We can't do anything useful when not the leader, so do nothing.
            self.model.unit.status = WaitingStatus("Waiting for leadership")
            return
        self.log = logging.getLogger(__name__)
        self.image = OCIImageResource(self, "oci-image")

        try:
            self.interfaces = get_interfaces(self)
        except NoVersionsListed as err:
            self.model.unit.status = WaitingStatus(str(err))
            return
        except NoCompatibleVersions as err:
            self.model.unit.status = BlockedStatus(str(err))
            return
        else:
            self.model.unit.status = ActiveStatus()

        for event in [
                self.on.install,
                self.on.upgrade_charm,
                self.on.config_changed,
        ]:
            self.framework.observe(event, self.main)

        self.framework.observe(self.on["ingress"].relation_changed,
                               self.configure_mesh)
Exemplo n.º 2
0
    def handle(self, event):
        for resource in self._resources.keys():
            if not self._resources[resource].fetch(self._framework.resources):
                self._framework.unit_status_set(
                    BlockedStatus(
                        'Missing or invalid image resource: {}'.format(
                            resource)))
                logger.info(
                    'Missing or invalid image resource: {}'.format(resource))
                return

        if not self._framework.unit_is_leader:
            self._framework.unit_status_set(
                WaitingStatus('Waiting for leader'))
            logger.info('Delegating pod configuration to the leader')
            return

        spec = self._builder.build_spec()
        self._framework.unit_status_set(
            MaintenanceStatus('Configuring container'))
        self._framework.pod_spec_set(spec)
        if self._pod.is_ready:
            self._framework.unit_status_set(ActiveStatus('ready'))
            logger.info('Pod is ready')
            return
        self._framework.unit_status_set(MaintenanceStatus('Pod is not ready'))
        logger.info('Pod is not ready')
Exemplo n.º 3
0
 def _on_check_status_and_write_config(self, event):
     if not self._check_status():
         event.defer()
         return
     slurm_config = dict(self._slurmd.get_slurm_config())
     self._slurm_manager.render_config_and_restart(slurm_config)
     self.unit.status = ActiveStatus("Slurmd Available")
Exemplo n.º 4
0
 def _on_config_changed(self, event):
     """Handle the config-changed event"""
     # Get the gosherve container so we can configure/manipulate it
     container = self.unit.get_container("gosherve")
     # Create a new config layer
     layer = self._gosherve_layer()
     try:
         # Get the current config
         services = container.get_plan().to_dict().get("services", {})
     except ConnectionError:
         # Since this is a config-changed handler and that hook can execute
         # before pebble is ready, we may get a connection error here. Let's
         # defer the event, meaning it will be retried the next time any
         # hook is executed. We don't have an explicit handler for
         # `self.on.gosherve_pebble_ready` but this method will be rerun
         # when that condition is met (because of `event.defer()`), and so
         # the `get_container` call will succeed and we'll continue to the
         # subsequent steps.
         event.defer()
         return
     # Check if there are any changes to services
     if services != layer["services"]:
         # Changes were made, add the new layer
         container.add_layer("gosherve", layer, combine=True)
         logging.info("Added updated layer 'gosherve' to Pebble plan")
         # Stop the service if it is already running
         if container.get_service("gosherve").is_running():
             container.stop("gosherve")
         # Restart it and report a new status to Juju
         container.start("gosherve")
         logging.info("Restarted gosherve service")
     # All is well, set an ActiveStatus
     self.unit.status = ActiveStatus()
Exemplo n.º 5
0
    def test__it_blocks_until_pod_is_ready(self,
                                           mock_build_juju_unit_status_func,
                                           mock_k8s_mod):
        # Setup
        mock_fw_adapter_cls = \
            create_autospec(adapters.framework.FrameworkAdapter,
                            spec_set=True)
        mock_fw = mock_fw_adapter_cls.return_value

        mock_juju_unit_states = [
            MaintenanceStatus(str(uuid4())),
            MaintenanceStatus(str(uuid4())),
            ActiveStatus(str(uuid4())),
        ]
        mock_build_juju_unit_status_func.side_effect = mock_juju_unit_states

        mock_event_cls = create_autospec(EventBase, spec_set=True)
        mock_event = mock_event_cls.return_value

        # Exercise
        charm.on_config_changed_handler(mock_event, mock_fw)

        # Assert
        assert mock_fw.set_unit_status.call_count == len(mock_juju_unit_states)
        assert mock_fw.set_unit_status.call_args_list == [
            call(status) for status in mock_juju_unit_states
        ]
Exemplo n.º 6
0
def test_main_with_relation(harness):
    harness.set_leader(True)
    harness.add_oci_resource(
        "oci-image",
        {
            "registrypath": "ci-test",
            "username": "",
            "password": "",
        },
    )
    rel_id = harness.add_relation("object-storage", "argo-controller")
    harness.add_relation_unit(rel_id, "argo-controller/0")
    harness.update_relation_data(
        rel_id,
        "argo-controller",
        {"_supported_versions": yaml.dump(["v1"])},
    )
    rel_id = harness.add_relation("object-storage", "foobar")
    harness.add_relation_unit(rel_id, "foobar/0")
    harness.update_relation_data(
        rel_id,
        "foobar",
        {"_supported_versions": yaml.dump(["v1"])},
    )
    harness.begin_with_initial_hooks()
    assert harness.charm.model.unit.status == ActiveStatus("")

    data = yaml.safe_load(harness.get_relation_data(rel_id, "minio")["data"])
    assert data["access-key"] == "minio"
    assert data["namespace"] is None
    assert data["port"] == 9000
    assert data["secure"] is False
    assert len(data["secret-key"]) == 30
    assert data["service"] == "minio"
Exemplo n.º 7
0
    def test_send_config_dhss_enabled(self, _render, _get_loader,
                                      _send_backend_config, _install_pkgs):
        _render.return_value = 'test-rendered-manila-backend-config'
        _get_loader.return_value = 'test-loader'
        config = copy.deepcopy(self.REQUIRED_CHARM_CONFIG_BY_DEFAULT)
        config['driver-handles-share-servers'] = True
        config['root-volume-aggregate-name'] = 'test_cluster_01_VM_DISK_1'
        self.harness.update_config(config)
        self.harness.begin_with_initial_hooks()

        # Validate workflow with incomplete relation data
        self.assertFalse(self.harness.charm.state.is_started)
        _render.assert_not_called()
        _get_loader.assert_not_called()
        _send_backend_config.assert_not_called()
        _install_pkgs.assert_called_once_with()
        self.assertEqual(self.harness.charm.unit.status, UnknownStatus())

        # Validate workflow with complete relation data
        rel_id = self.harness.add_relation('manila-plugin', 'manila')
        self.harness.add_relation_unit(rel_id, 'manila/0')
        self.harness.update_relation_data(rel_id, 'manila/0', {
            '_authentication_data':
            json.dumps({'data': 'test-manila-auth-data'})
        })
        self.assertTrue(self.harness.charm.state.is_started)
        _render.assert_called_once_with(source='manila.conf',
                                        template_loader='test-loader',
                                        target=None,
                                        context=self.harness.charm.adapters)
        _get_loader.assert_called_once_with('templates/', 'default')
        _send_backend_config.assert_called_once_with(
            'netapp-ontap', 'test-rendered-manila-backend-config')
        self.assertEqual(self.harness.charm.unit.status,
                         ActiveStatus('Unit is ready'))
Exemplo n.º 8
0
 def _on_install(self, event):
     self.unit.status = MaintenanceStatus("Installing packages")
     RunnerManager.install_deps()
     runner_manager = self._get_runner_manager()
     if runner_manager:
         self.unit.status = MaintenanceStatus("Installing runner binary")
         try:
             self._stored.runner_bin_url = runner_manager.get_latest_runner_bin_url(
             )
             runner_manager.update_runner_bin(self._stored.runner_bin_url)
         except Exception as e:
             logger.exception("Failed to update runner binary")
             self.unit.status = BlockedStatus(
                 f"Failed to update runner binary: {e}")
             return
         self.unit.status = MaintenanceStatus("Starting runners")
         try:
             self._reconcile_runners(runner_manager)
         except RunnerError as e:
             logger.exception("Failed to start runners")
             self.unit.status = BlockedStatus(
                 f"Failed to start runners: {e}")
         else:
             self.unit.status = ActiveStatus()
     else:
         self.unit.status = BlockedStatus(
             "Missing token or org/repo path config")
Exemplo n.º 9
0
    def configure_pod(self, event):
        if not self.framework.model.unit.is_leader():
            self.model.unit.status = WaitingStatus('Not a leader')
            return

        mattermost_image_details = self.mattermost_image.fetch()
        self.model.unit.status = MaintenanceStatus('Configuring pod')
        config = self.model.config
        self.model.pod.set_spec({
            'containers': [{
                'name':
                self.framework.model.app.name,
                'imageDetails':
                mattermost_image_details,
                'ports': [{
                    'containerPort':
                    int(self.framework.model.config['mattermost_port']),
                    'protocol':
                    'TCP',
                }],
                'config': {
                    'MATTERMOST_HTTPD_LISTEN_PORT':
                    int(config['mattermost_port']),
                    'DB_HOST': config['pg_db_host'],
                    'DB_PORT_NUMBER': int(config['pg_db_port']),
                    'MM_USERNAME': config['pg_user'],
                    'MM_PASSWORD': config['pg_password'],
                    'MM_ENABLEOPENSERVER': config['open_server'],
                },
            }]
        })
        self.state.is_started = True
        self.model.unit.status = ActiveStatus()
Exemplo n.º 10
0
    def configure_pod(self):
        """Set Juju / Kubernetes pod spec built from `_build_pod_spec()`."""

        # check for valid high availability (or single node) configuration
        self._check_high_availability()
        self._check_config()

        # decide whether we can set the pod spec or not
        # TODO: is this necessary?
        if isinstance(self.unit.status, BlockedStatus):
            log.error('Application is in a blocked state. '
                      'Please resolve before pod spec can be set.')
            return

        if not self.unit.is_leader():
            self.unit.status = ActiveStatus()
            return

        # general pod spec component updates
        self.unit.status = MaintenanceStatus('Building pod spec.')
        pod_spec = self._build_pod_spec()
        self._update_pod_data_source_config_file(pod_spec)
        self._update_pod_config_ini_file(pod_spec)

        # set the pod spec with Juju
        self.model.pod.set_spec(pod_spec)
        self.unit.status = APPLICATION_ACTIVE_STATUS
Exemplo n.º 11
0
    def on_pebble_ready(self, event: WorkloadEvent) -> None:
        """Run the pebble ready hook.

        Args:
            event: the event object
        """
        if self._num_units() != self._goal_units():
            self.unit.status = MaintenanceStatus("Waiting for units")
            event.defer()
            return

        if self._set_config_file(event) is False:
            self.unit.status = MaintenanceStatus("Waiting for IP address")
            event.defer()
            return

        if self._set_layer():
            self._container.restart("cassandra")

        if self.unit.is_leader():
            if not self.cassandra.root_password(event):
                self.unit.status = MaintenanceStatus("Waiting for Database")
                event.defer()
                return

        if self.model.relations["monitoring"]:
            self._setup_monitoring()

        self.provider.update_address("database", self._hostname())

        self.unit.status = ActiveStatus()
Exemplo n.º 12
0
 def test_status(self):
     config_1 = {
         "MYSQL_ROOT_PASSWORD": "******",
     }
     self.harness.set_leader(True)
     self.harness.update_config(config_1)
     self.assertEqual(self.harness.charm.unit.status, ActiveStatus())
Exemplo n.º 13
0
    def _on_config_changed(self, _):
        """Handle changes to the application configuration"""
        restart = False

        # Check if the application repo has been changed
        if self.config["application-repo"] != self._stored.repo:
            logger.info("application repo changed, installing")
            self._stored.repo = self.config["application-repo"]
            self._setup_application()
            restart = True

        if self.config["port"] != self._stored.port:
            logger.info("port config changed, configuring")
            # Close the existing application port
            check_call(["close-port", f"{self._stored.port}/TCP"])
            # Reconfigure the systemd unit to specify the new port
            self._stored.port = self.config["port"]
            self._render_systemd_unit()
            # Ensure the correct port is opened for the application
            check_call(["open-port", f"{self._stored.port}/TCP"])
            restart = True

        if restart:
            logger.info("restarting hello-juju application")
            systemd.service_restart("hello-juju")

        self.unit.status = ActiveStatus()
Exemplo n.º 14
0
    def _on_database_master_changed(self, event):
        """Handler the case where a new PostgreSQL DB master is available"""
        if event.database != self.app.name:
            # Leader has not yet set the database name/requirements.
            return

        # event.master will be none if the master database is unavailable,
        # or a pgsql.ConnectingString instance
        if event.master:
            self.unit.status = MaintenanceStatus(
                "configuring database settings")
            # Store the connection uri in state
            # Replace the first part of the URL with pg8000 equivalent
            self._stored.conn_str = event.master.uri.replace(
                "postgresql://", "postgresql+pg8000://")
            # Render the settings file with the database connection details
            self._render_settings_file()
            # Ensure the database tables are created in the master
            self._create_database_tables()
            # Restart the service
            systemd.service_restart("hello-juju")
            # Set back to active status
            self.unit.status = ActiveStatus()
        else:
            # Defer this event until the master is available
            event.defer()
            return
Exemplo n.º 15
0
 def test_on_update_status_when_unit_is_not_leader(self):
     # Given
     self.harness.set_leader(False)
     # When
     self.harness.charm.on.update_status.emit()
     # Then
     self.assertEqual(self.harness.charm.unit.status, ActiveStatus())
Exemplo n.º 16
0
    def set_pod_spec(self, event):
        try:
            image_details = self.image.fetch()
        except OCIImageResourceError as e:
            self.model.unit.status = e.status
            log.info(e)
            return

        secret_key = self.model.config["secret-key"] or self._stored.secret_key

        self.model.unit.status = MaintenanceStatus("Setting pod spec")
        self.model.pod.set_spec({
            "version":
            3,
            "containers": [{
                "name":
                "minio",
                "args": ["server", "/data"],
                "imageDetails":
                image_details,
                "ports": [{
                    "name": "minio",
                    "containerPort": int(self.model.config["port"]),
                }],
                "envConfig": {
                    "MINIO_ACCESS_KEY": self.model.config["access-key"],
                    "MINIO_SECRET_KEY": secret_key,
                },
            }],
        })
        self.model.unit.status = ActiveStatus()
Exemplo n.º 17
0
 def test_on_config_changed_when_unit_is_not_leader(self):
     # Given
     self.harness.set_leader(False)
     # When
     self.harness.charm.on.config_changed.emit()
     # Then
     self.assertEqual(self.harness.charm.unit.status, ActiveStatus())
Exemplo n.º 18
0
    def __init__(self, *args):
        super().__init__(*args)

        if not self.unit.is_leader():
            # We can't do anything useful when not the leader, so do nothing.
            self.model.unit.status = WaitingStatus("Waiting for leadership")
            return

        try:
            self.interfaces = get_interfaces(self)
        except NoVersionsListed as err:
            self.model.unit.status = WaitingStatus(str(err))
            return
        except NoCompatibleVersions as err:
            self.model.unit.status = BlockedStatus(str(err))
            return
        else:
            self.model.unit.status = ActiveStatus()

        self.log = logging.getLogger(__name__)

        # Every lightkube API call will use the model name as the namespace by default
        self.lightkube_client = Client(namespace=self.model.name,
                                       field_manager="lightkube")

        self.framework.observe(self.on.start, self.start)
        self.framework.observe(self.on["istio-pilot"].relation_changed,
                               self.start)
        self.framework.observe(self.on.config_changed, self.start)
        self.framework.observe(self.on.remove, self.remove)
Exemplo n.º 19
0
    def _write_config_and_restart_slurmdbd(self, event):
        """Check for prereqs before writing config/restart of slurmdbd."""
        # Ensure all pre-conditions are met with _check_status(), if not
        # defer the event.
        if not self._check_status():
            event.defer()
            return

        db_info = self._stored.db_info
        slurmdbd_info = self._slurmdbd_peer.get_slurmdbd_info()
        slurmdbd_stored_config = dict(self._stored.slurmdbd_config)

        slurmdbd_config = {
            **self.config,
            **slurmdbd_info,
            **db_info,
        }

        if slurmdbd_config != slurmdbd_stored_config:
            self._stored.slurmdbd_config = slurmdbd_config
            self._slurm_manager.render_slurm_configs(slurmdbd_config)
            self._slurm_manager.restart_slurm_component()

            # Only the leader can set relation data on the application.
            # Enforce that no one other then the leader trys to set
            # application relation data.
            if self.model.unit.is_leader():
                self._slurmdbd.set_slurmdbd_info_on_app_relation_data(
                    slurmdbd_config,
                )
        self.unit.status = ActiveStatus("slurmdbd available")
Exemplo n.º 20
0
    def _on_config_changed(self, event):
        logging.debug('_on_config_changed(): entering')
        conf = self.model.config

        if not Path(".installed").exists():
            self.unit.status = WaitingStatus(
                "Waiting on nhc to finish installing...")
            event.defer()
            return

        config_auto = conf['nhc-config-autodetect']
        logging.debug(f'_on_config_changed(): config_auto={config_auto}')

        # Write the nhc config
        self._nhc_ops_manager.write_nhc_config(config_auto, conf['nhc-config'])
        # Update relation data with config values if we are the leader
        if self.model.unit.is_leader():
            health_check_interval = conf['health-check-interval']
            health_check_node_state = conf['health-check-node-state']
            self._nhc_provides.update_relation_data(
                health_check_interval=health_check_interval,
                health_check_node_state=health_check_node_state,
            )

        self._nhc_ops_manager.set_nhc_debug(conf.get('debug'))
        self.unit.status = ActiveStatus("Config update complete")
Exemplo n.º 21
0
    def _on_zookeeper_pebble_ready(self, event):
        """Define and start a workload using the Pebble API.

        Learn more about Pebble layers at https://github.com/canonical/pebble
        """
        container = event.workload

        relation = self.model.get_relation('replicas')
        my_ingress_address = self._get_my_ingress_address(relation)
        all_unit_ingress_addresses = self._get_all_unit_ingress_addresses(
            relation)
        self.__push_zookeeper_config(container, my_ingress_address,
                                     all_unit_ingress_addresses)

        pebble_layer = {
            "summary": "zookeeper layer",
            "description": "pebble config layer for zookeeper",
            "services": {
                self.__PEBBLE_SERVICE_NAME: {
                    "override": "replace",
                    "summary": "zookeeper",
                    "command":
                    "/docker-entrypoint.sh zkServer.sh start-foreground",
                    "startup": "enabled",
                }
            },
        }
        container.add_layer("zookeeper", pebble_layer, combine=True)

        # Autostart any services that were defined with startup: enabled
        container.autostart()

        # Learn more about statuses in the SDK docs:
        # https://juju.is/docs/sdk/constructs#heading--statuses
        self.unit.status = ActiveStatus()
Exemplo n.º 22
0
    def _on_config_changed(self, event) -> None:
        # Defer the config-changed event if we do not have sufficient privileges
        if not self._k8s_auth():
            event.defer()
            return

        # Default StatefulSet needs patching for extra volume mounts. Ensure that
        # the StatefulSet is patched on each invocation.
        if not self._statefulset_patched:
            self._patch_stateful_set()
            self.unit.status = MaintenanceStatus(
                "waiting for changes to apply")

        try:
            # Configure and start the Metrics Scraper
            self._config_scraper()
            # Configure and start the Kubernetes Dashboard
            self._config_dashboard()
        except ConnectionError:
            logger.info(
                "pebble socket not available, deferring config-changed")
            event.defer()
            return

        self.unit.status = ActiveStatus()
Exemplo n.º 23
0
 def _on_config_changed(self, _):
     self._stored.configured = False
     self.framework.model.unit.status = MaintenanceStatus("Configuring pod")
     logger.info('Reapplying the updated pod spec')
     self.set_pod_spec()
     self.framework.model.unit.status = ActiveStatus("Ready")
     self._stored.configured = True
Exemplo n.º 24
0
    def on_start(self, event):
        """Called when the charm is being installed"""
        if not self.peers.is_joined:
            event.defer()
            return

        unit = self.model.unit

        if not SSHProxy.has_ssh_key():
            unit.status = MaintenanceStatus("Generating SSH keys...")
            pubkey = None
            privkey = None
            if self.is_leader:
                if self.peers.is_cluster_initialized:
                    SSHProxy.write_ssh_keys(
                        public=self.peers.ssh_public_key,
                        private=self.peers.ssh_private_key,
                    )
                else:
                    SSHProxy.generate_ssh_key()
                    self.on.ssh_keys_initialized.emit(
                        SSHProxy.get_ssh_public_key(),
                        SSHProxy.get_ssh_private_key())
                unit.status = ActiveStatus()
            else:
                unit.status = WaitingStatus(
                    "Waiting for leader to populate the keys")
Exemplo n.º 25
0
 def update_status(self):
     logging.info("Updating status")
     if self.state.series_upgrade:
         self.unit.status = BlockedStatus(
             'Ready for do-release-upgrade and reboot. '
             'Set complete when finished.')
         return
     if self.state.is_paused:
         self.unit.status = MaintenanceStatus(
             "Paused. Use 'resume' action to resume normal service.")
         return
     missing_relations = []
     for relation in self.REQUIRED_RELATIONS:
         if not self.model.get_relation(relation):
             missing_relations.append(relation)
     if missing_relations:
         self.unit.status = BlockedStatus('Missing relations: {}'.format(
             ', '.join(missing_relations)))
         return
     try:
         # Custom checks return True if the checked passed else False.
         # If the check failed the custom check will have set the status.
         if not self.custom_status_check():
             return
     except NotImplementedError:
         pass
     if self.state.is_started:
         self.unit.status = ActiveStatus('Unit is ready')
     else:
         self.unit.status = WaitingStatus('Charm configuration in progress')
     logging.info("Status updated")
Exemplo n.º 26
0
    def _on_check_status_and_write_config(self, event):
        slurm_config = self._check_status()
        if not slurm_config:
            event.defer()
            return

        # if slurm_config['configless']:
        #    slurmctld_hostname = slurm_config['active_controller_hostname']
        #    self._slurm_manager.configure_slurmctld_hostname(
        #        slurmctld_hostname
        #    )
        #    self._slurm_manager.restart_slurm_component()
        # else:

        # Ensure we aren't dealing with a StoredDict before trying
        # to render the slurm.conf.
        slurm_config = dict(slurm_config)
        self._slurm_manager.render_slurm_configs(slurm_config)

        # Only restart slurmd the first time the node is brought up.
        if not self._stored.slurmd_restarted:
            self._slurm_manager.restart_slurm_component()
            self._stored.slurmd_restarted = True

        self.unit.status = ActiveStatus("slurmd available")
Exemplo n.º 27
0
 def handle(self, event):
     if self._pod.is_ready:
         logger.info('Pod is ready')
         self._framework.unit_status_set(ActiveStatus())
         return
     self._framework.unit_status_set(BlockedStatus('Pod is not ready'))
     logger.info('Pod is not ready')
Exemplo n.º 28
0
    def refresh(self, event):
        logger.info("Refresh! is leader %s", self.unit.is_leader)
        logger.info("Refresh! is leader() %s", self.unit.is_leader())
        event.set_results({ #FIXME
            'message': "Backup ended ok.",
            "time": 1361.77241278412983472378946237868,
        })
        return  #FIXME

        self.unit.status = MaintenanceStatus("Refreshing blog")
        logger.info("Updating blog files")
        os.chdir(BLOGDIR)
        run(['git', 'pull'])

        logger.info("Rebuilding the blog")
        self._build_blog()

        #FIXME: log, set the active status and answer with the git hash
        git_hash = "FIXME"
        msg = "Blog refreshed to HEAD {}".format(git_hash)
        logger.info(msg)
        self.unit.status = ActiveStatus(msg)
        fail = False  #FIXME
        if fail:
            event.fail("Device error: no space left")
        else:
            event.set_results({"HEAD hash": git_hash})
Exemplo n.º 29
0
    def _check_status(self):
        """Check that the core components we need exist."""
        slurmctld_available = self._stored.slurmctld_available
        slurmdbd_available = self._stored.slurmdbd_available
        slurmd_available = self._stored.slurmd_available
        slurm_installed = self._stored.slurm_installed

        deps = [
            slurmctld_available,
            slurmdbd_available,
            slurmd_available,
            slurm_installed,
        ]
        logger.debug("############### DEPS #####################")
        logger.debug(deps)

        if not all(deps):
            if not slurmctld_available:
                self.unit.status = BlockedStatus("NEED RELATION TO SLURMCTLD")
            elif not slurmdbd_available:
                self.unit.status = BlockedStatus("NEED RELATION TO SLURMDBD")
            elif not slurmd_available:
                self.unit.status = BlockedStatus("NEED RELATION TO SLURMD")
            else:
                self.unit.status = BlockedStatus("SLURM NOT INSTALLED")
            return False
        else:
            self.unit.status = ActiveStatus("")
            return True
Exemplo n.º 30
0
    def reconfigure_keepalived(self):
        if not self.keepalived.is_joined:
            return

        # TODO: the check source into a separate file.
        vrrp_scripts = []
        for port in self.tcp_backends.frontend_ports:
            vrrp_scripts.append(
                VRRPScript(
                    f'haproxy_port_{port}_check',
                    f'''script "bash -c '</dev/tcp/127.0.0.1/{port}'"'''))
        # TODO: there needs to be a better way to determine an egress-facing network interface
        # on which to configure a virtual IP than this.
        vip_interface = self.model.get_binding(
            'website').network.interfaces[0].name
        virtual_ip = self.model.config.get('virtual-ip')
        if virtual_ip is None:
            self.unit.status = BlockedStatus(
                'Waiting for an administrator to set virtual-ip.')
            return
        vrrp_instance = VRRPInstance(self.app.name,
                                     self.model.config['virtual-router-id'],
                                     [virtual_ip],
                                     vip_interface,
                                     track_interfaces=[vip_interface],
                                     track_scripts=vrrp_scripts)
        self.keepalived.configure_vrrp_instances([vrrp_instance])
        self.unit.status = ActiveStatus()