def start_datanode(namenode):
    hookenv.status_set('maintenance', 'starting datanode')
    # NB: service should be started by install, but we want to verify it is
    # running before we set the .started state and open ports. We always
    # restart here, which may seem heavy-handed. However, restart works
    # whether the service is currently started or stopped. It also ensures the
    # service is using the most current config.
    started = host.service_restart('hadoop-hdfs-datanode')
    if started:
        # Create a /user/ubuntu dir in HDFS (this is safe to run multiple times).
        bigtop = Bigtop()
        if not bigtop.check_hdfs_setup():
            try:
                utils.wait_for_hdfs(30)
                bigtop.setup_hdfs()
            except utils.TimeoutError:
                # HDFS is not yet available or is still in safe mode, so we can't
                # do the initial setup (create dirs); skip setting the .started
                # state below so that we try again on the next hook.
                hookenv.status_set('waiting', 'waiting on hdfs')
                return

        # HDFS is ready. Open ports and set .started, status, and app version
        for port in get_layer_opts().exposed_ports('datanode'):
            hookenv.open_port(port)
        set_state('apache-bigtop-datanode.started')
        hookenv.status_set('maintenance', 'datanode started')
        hookenv.application_version_set(get_hadoop_version())
    else:
        hookenv.log('DataNode failed to start')
        hookenv.status_set('blocked', 'datanode failed to start')
        remove_state('apache-bigtop-datanode.started')
        for port in get_layer_opts().exposed_ports('datanode'):
            hookenv.close_port(port)
Exemplo n.º 2
0
def stop():
    ''' Juju calls the stop hook before the unit is destroyed.  Clean up. '''
    # Do we need to call explicitly call leave here?
    if host.service_running('consul'):
        host.service_stop('consul')
    for p in PORTS:
        hookenv.close_port(p)
Exemplo n.º 3
0
def config_changed():
    config = hookenv.config()
    if config.changed('port'):
        if config.previous('port'):
            hookenv.close_port(config.previous('port'))
        hookenv.open_port(config['port'], protocol='UDP')
    set_state('statsd.configured')
Exemplo n.º 4
0
def stop_registry(name=None, remove=True):
    '''Stop a registry container.

    Stop and optionally remove the named registry container. If a name is not
    specified, this method will stop the configured 'registry-name' container.

    :param name: Name of the container to stop
    :param remove: True removes the container after stopping
    '''
    charm_config = hookenv.config()
    port = charm_config.get('registry-port')
    if not name:
        name = charm_config.get('registry-name')

    # only try to stop running containers
    if is_container(name, all=False):
        cmd = ['docker', 'container', 'stop', name]
        try:
            subprocess.check_call(cmd)
        except subprocess.CalledProcessError:
            hookenv.log('Could not stop container: {}'.format(name),
                        level=hookenv.ERROR)
            raise

    # only try to remove existing containers
    if remove and is_container(name):
        cmd = ['docker', 'container', 'rm', '--volumes', name]
        try:
            subprocess.check_call(cmd)
        except subprocess.CalledProcessError:
            hookenv.log('Could not remove container: {}'.format(name),
                        level=hookenv.ERROR)
            raise

    hookenv.close_port(port)
Exemplo n.º 5
0
def stop_kafka_waiting_for_zookeeper_ready():
    hookenv.status_set('maintenance', 'zookeeper not ready, stopping kafka')
    kafka = Kafka()
    hookenv.close_port(hookenv.config()['port'])
    kafka.stop()
    remove_state('kafka.started')
    hookenv.status_set('waiting', 'waiting for zookeeper to become ready')
Exemplo n.º 6
0
def stop():
    ''' Juju calls the stop hook before the unit is destroyed.  Clean up. '''
    # Do we need to call explicitly call leave here?
    if host.service_running('consul'):
        host.service_stop('consul')
    for p in PORTS:
        hookenv.close_port(p)
Exemplo n.º 7
0
def stop_kafka_waiting_for_zookeeper_ready():
    hookenv.status_set('maintenance', 'zookeeper not ready, stopping confluent_kafka_rest')
    kafkarest = ConfluentKafkaRest()
    hookenv.close_port(KAFKA_REST_PORT)
    kafkarest.stop()
    remove_state('confluent_kafka_rest.started')
    hookenv.status_set('waiting', 'waiting for zookeeper to become ready')
Exemplo n.º 8
0
def build_config(cfg):
    """Build and save the jujushell server config."""
    juju_addrs = (_get_string(cfg, 'juju-addrs')
                  or os.getenv('JUJU_API_ADDRESSES'))
    if not juju_addrs:
        raise ValueError('could not find API addresses')
    juju_cert = _get_string(cfg, 'juju-cert')
    if juju_cert == 'from-unit':
        juju_cert = _get_juju_cert(agent_path())

    current_port = get_port(cfg)
    # TODO: it's very unfortunate that charm helpers do not allow to get the
    # previous config as a dict.
    previous_cfg = getattr(cfg, '_prev_dict', {}) or {}
    previous_port = get_port(previous_cfg)
    hookenv.open_port(current_port)
    if previous_port and previous_port != current_port:
        hookenv.close_port(previous_port)

    data = {
        'juju-addrs': juju_addrs.split(),
        'juju-cert': juju_cert,
        'image-name': IMAGE_NAME,
        'log-level': cfg['log-level'],
        'port': current_port,
        'profiles': (PROFILE_DEFAULT, PROFILE_TERMSERVER),
    }
    if cfg['tls']:
        data.update(_build_tls_config(cfg))
    with open(config_path(), 'w') as stream:
        yaml.safe_dump(data, stream=stream)
Exemplo n.º 9
0
def install_clustered():
    service_stop('arangodb3')
    if not is_flag_set('arangodb.clustered'):
        if unit_private_ip() == leader_get('master_ip'):
            render(
                source='arangodbcluster.service',
                target='/etc/systemd/system/arangodbcluster.service',
                context={'option': '--starter.data-dir={}'.format(DATA_DIR)})
            subprocess.check_call(['systemctl', 'daemon-reload'])
            subprocess.check_call(
                ['systemctl', 'enable', 'arangodbcluster.service'])
            service_start('arangodbcluster')
            set_flag('arangodb.clustered')
            leader_set({'master_started': True})
        elif leader_get('master_started'):
            render(source='arangodbcluster.service',
                   target='/etc/systemd/system/arangodbcluster.service',
                   context={
                       'option':
                       '--starter.data-dir={} --starter.join {}'.format(
                           DATA_DIR, leader_get('master_ip'))
                   })
            subprocess.check_call(['systemctl', 'daemon-reload'])
            subprocess.check_call(
                ['systemctl', 'enable', 'arangodbcluster.service'])
            service_start('arangodbcluster')
            #let the charm sleep for 15 seconds so that the setup file is created
            time.sleep(15)
            set_flag('arangodb.clustered')
    setup_file = Path('{}/setup.json'.format(DATA_DIR))
    if setup_file.exists():
        close_port(kv.get('port'))
        open_coordinater_port()
Exemplo n.º 10
0
def change_config():
    conf = config()
    if conf.changed('port') or conf.changed('authentication'):
        old_port = conf.previous('port')
        render(source='arangod.conf',
               target='/etc/arangodb3/arangod.conf',
               context={
                   'port': str(conf['port']),
                   'authentication': str(conf['authentication']).lower()
               })
        if old_port is not None:
            close_port(old_port)
        open_port(conf['port'])
    if conf['root_password'] != kv.get(
            'password') and conf['root_password'] != "":
        password = conf['root_password']
        old_password = kv.get('password')
        kv.set('password', password)
        TCP = 'tcp://' + unit_public_ip() + ':' + str(conf['port'])
        require = "require('@arangodb/users').update('root', '{}', true)".format(
            password)
        subprocess.check_call([
            'arangosh', '--server.endpoint', TCP, '--server.username', 'root',
            '--server.password', old_password, '--javascript.execute-string',
            require
        ])
Exemplo n.º 11
0
def stop_datanode():
    hookenv.status_set('maintenance', 'stopping datanode')
    for port in get_layer_opts().exposed_ports('datanode'):
        hookenv.close_port(port)
    host.service_stop('hadoop-hdfs-datanode')
    remove_state('datanode.started')
    hookenv.status_set('maintenance', 'datanode stopped')
Exemplo n.º 12
0
 def __call__(self, manager, service_name, event_name):
     service = manager.get_service(service_name)
     # turn this generator into a list,
     # as we'll be going over it multiple times
     new_ports = list(service.get('ports', []))
     port_file = os.path.join(hookenv.charm_dir(),
                              '.{}.ports'.format(service_name))
     if os.path.exists(port_file):
         with open(port_file) as fp:
             old_ports = fp.read().split(',')
         for old_port in old_ports:
             if bool(old_port) and not self.ports_contains(
                     old_port, new_ports):
                 hookenv.close_port(old_port)
     with open(port_file, 'w') as fp:
         fp.write(','.join(str(port) for port in new_ports))
     for port in new_ports:
         # A port is either a number or 'ICMP'
         protocol = 'TCP'
         if str(port).upper() == 'ICMP':
             protocol = 'ICMP'
         if event_name == 'start':
             hookenv.open_port(port, protocol)
         elif event_name == 'stop':
             hookenv.close_port(port, protocol)
Exemplo n.º 13
0
def config_changed():

    if not conf.changed('server_port') and not conf.changed('RAM_MAX'):
        return

    log('ftb-infinity: config_changed')
    cur_status = status_get()
    status_set('maintenance', 'configuring')

    port_changed = conf.changed('server_port')
    ram_changed = conf.changed('RAM_MAX')

    # Let's suppose java will rewrite server.properties on exit
    started = is_state(CHARM_STATE_STARTED)
    if started:
        service_stop(CHARM_NAME)
        sleep(2)

    if port_changed:
        close_port(conf.previous('server_port'))
        ftb_config_server()

    if ram_changed:
        ftb_systemd_install()

    if started:
        service_start(CHARM_NAME)
        if port_changed:
            open_port(conf['server_port'])

    # restore state
    status_set(cur_status[0], cur_status[1])
Exemplo n.º 14
0
def config_changed():
    if not config('action-managed-upgrade'):
        if openstack_upgrade_available('ceilometer-common'):
            status_set('maintenance', 'Upgrading to new OpenStack release')
            do_openstack_upgrade(CONFIGS)
    install_event_pipeline_setting()
    update_nrpe_config()
    CONFIGS.write_all()
    # NOTE(jamespage): Drop when charm switches to apache2+mod_wsgi
    #                  reload ensures port override is set correctly
    reload_systemd()
    ceilometer_joined()

    cmp_codename = CompareOpenStackReleases(
        get_os_codename_install_source(config('openstack-origin')))
    if cmp_codename < 'queens':
        open_port(CEILOMETER_PORT)
    else:
        close_port(CEILOMETER_PORT)

    configure_https()

    # NOTE(jamespage): Iterate identity-{service,credentials} relations
    #                  to pickup any required databag changes on these
    #                  relations.
    for rid in relation_ids('identity-service'):
        keystone_joined(relid=rid)
    for rid in relation_ids('identity-credentials'):
        keystone_credentials_joined(relid=rid)

    # Define the new ocf resource and use the key delete_resources to delete
    # legacy resource for >= Liberty since the ceilometer-agent-central moved
    # to ceilometer-polling in liberty (see LP: #1606787).
    for rid in relation_ids('ha'):
        ha_joined(rid)
Exemplo n.º 15
0
def close_old_port():
    config = hookenv.config()
    old_port = config.previous('port')
    try:
        hookenv.close_port(old_port)
    except CalledProcessError:
        hookenv.log('Port %d already closed, skipping.' % old_port)
Exemplo n.º 16
0
 def __call__(self, manager, service_name, event_name):
     """Open unit ports."""
     service = manager.get_service(service_name)
     new_ports = service.get("ports", [])
     port_file = os.path.join(hookenv.charm_dir(),
                              ".{}.ports".format(service_name))
     if os.path.exists(port_file):
         with open(port_file) as fp:
             old_ports = fp.read().split(",")
         for old_port in old_ports:
             if bool(old_port) and not self.ports_contains(
                     old_port, new_ports):
                 hookenv.close_port(old_port)
     with open(port_file, "w") as fp:
         fp.write(",".join(str(port) for port in new_ports))
     for port in new_ports:
         # A port is either a number or 'ICMP'
         protocol = "TCP"
         if str(port).upper() == "ICMP":
             protocol = "ICMP"
         if event_name == "start":
             try:
                 hookenv.open_port(port, protocol)
             except subprocess.CalledProcessError as err:
                 if err.returncode == 1:
                     hookenv.log(
                         "open_port returns: {}, ignoring".format(err),
                         level=hookenv.INFO,
                     )
                 else:
                     raise
         elif event_name == "stop":
             hookenv.close_port(port, protocol)
Exemplo n.º 17
0
def config_changed():
    conf = config()
    db = unitdata.kv()
    changed = False
    if conf.get('proxy_port') != db.get('proxy_port'):
        changed = True
        close_port(db.get('proxy_port'))
        db.set('proxy_port', conf.get('proxy_port'))
    if conf.get('admin_port') != db.get('admin_port'):
        changed = True
        close_port(db.get('admin_port'))
        db.set('admin_port', conf.get('admin_port'))
    if changed:
        status_set('maintenance', '(Updating) Adjusting settings')
        context = {
            'host': db.get('host'),
            'proxy_port': db.get('proxy_port'),
            'admin_port': db.get('admin_port'),
            'db_update_propagation': db.get('db_update_propagation'),
            'cass_contact_points': db.get('cass_cp'),
            'cass_port': db.get('cass_port'),
            'cass_username': db.get('cass_username'),
            'cass_password': db.get('cass_password'),
        }
        render('kong.conf', '/etc/kong/kong.conf', context)
        subprocess.call(['kong', 'restart'])
        open_port(db.get('proxy_port'))
        open_port(db.get('admin_port'))
        status_set('active', '(Ready) Kong running.')
Exemplo n.º 18
0
    def update_ports(self):
        """Update open ports based on configuration so that Juju can expose them."""
        opened_ports = str(subprocess.check_output(["opened-ports"]),
                           "utf-8").split("/tcp\n")
        hookenv.log("Opened ports {}".format(opened_ports), "DEBUG")

        for frontend in self.proxy_config.frontends:
            if frontend.port in opened_ports:
                if (self.charm_config["enable-stats"]
                        and self.charm_config["stats-local"] and
                        self.charm_config["stats-port"] == int(frontend.port)):
                    hookenv.log(
                        "Stats port set to be closed {}".format(frontend.port),
                        "DEBUG")
                else:
                    hookenv.log("Port already open {}".format(frontend.port),
                                "DEBUG")
                    opened_ports.remove(frontend.port)
            else:
                if (self.charm_config["enable-stats"]
                        and self.charm_config["stats-local"] and
                        self.charm_config["stats-port"] == int(frontend.port)):
                    hookenv.log(
                        "Not opening stats port {}".format(frontend.port),
                        "DEBUG")
                else:
                    hookenv.log("Opening {}".format(frontend.port), "DEBUG")
                    hookenv.open_port(frontend.port)

        for port in opened_ports:
            if port:
                hookenv.log("Closing port {}".format(port), "DEBUG")
                hookenv.close_port(port)
def configure_rabbit_ssl():
    """
    The legacy config support adds some additional complications.

    ssl_enabled = True, ssl = off -> ssl enabled
    ssl_enabled = False, ssl = on -> ssl enabled
    """
    ssl_mode, external_ca = _get_ssl_mode()

    if ssl_mode == 'off':
        if os.path.exists(rabbit.RABBITMQ_CONF):
            os.remove(rabbit.RABBITMQ_CONF)
        close_port(config('ssl_port'))
        reconfigure_client_ssl()
        return
    ssl_key = _convert_from_base64(config('ssl_key'))
    ssl_cert = _convert_from_base64(config('ssl_cert'))
    ssl_ca = _convert_from_base64(config('ssl_ca'))
    ssl_port = config('ssl_port')

    # If external managed certs then we need all the fields.
    if (ssl_mode in ('on', 'only') and any((ssl_key, ssl_cert)) and
            not all((ssl_key, ssl_cert))):
        log('If ssl_key or ssl_cert are specified both are required.',
            level=ERROR)
        sys.exit(1)

    if not external_ca:
        ssl_cert, ssl_key, ssl_ca = ServiceCA.get_service_cert()

    rabbit.enable_ssl(
        ssl_key, ssl_cert, ssl_port, ssl_ca,
        ssl_only=(ssl_mode == "only"), ssl_client=False)
    reconfigure_client_ssl(True)
    open_port(ssl_port)
Exemplo n.º 20
0
def check_ports(new_port):
    kv = unitdata.kv()
    if kv.get('grafana.port') != new_port:
        hookenv.open_port(new_port)
        if kv.get('grafana.port'):  # Dont try to close non existing ports
            hookenv.close_port(kv.get('grafana.port'))
        kv.set('grafana.port', new_port)
Exemplo n.º 21
0
def stop_scope():
    compose = Compose('files/scope')
    compose.kill()
    compose.rm()
    hookenv.close_port(4040)
    reactive.remove_state('scope.started')
    hookenv.status_set('maintenance', 'Weave stopped.')
Exemplo n.º 22
0
    def close_ports(self):
        '''
        Close off communication from the outside world.

        '''
        for port in self.dist_config.exposed_ports('zookeeper'):
            close_port(port)
Exemplo n.º 23
0
    def close_ports(self):
        '''
        Close off communication from the outside world.

        '''
        for port in self.dist_config.exposed_ports('zookeeper'):
            close_port(port)
Exemplo n.º 24
0
 def update_ports(self):
     opened_ports = str(subprocess.check_output(["opened-ports"]),
                        'utf-8').split('/tcp\n')
     hookenv.log("Opened ports {}".format(opened_ports), "DEBUG")
     for frontend in self.proxy_config.frontends:
         if frontend.port in opened_ports:
             if self.charm_config['enable-stats'] \
                     and self.charm_config['stats-local'] and \
                self.charm_config['stats-port'] == int(frontend.port):
                 hookenv.log(
                     "Stats port set to be closed {}".format(frontend.port),
                     "DEBUG")
             else:
                 hookenv.log("Port already open {}".format(frontend.port),
                             "DEBUG")
                 opened_ports.remove(frontend.port)
         else:
             if self.charm_config['enable-stats'] and \
                     self.charm_config['stats-local'] and \
                self.charm_config['stats-port'] == int(frontend.port):
                 hookenv.log(
                     "Not opening stats port {}".format(frontend.port),
                     "DEBUG")
             else:
                 hookenv.log("Opening {}".format(frontend.port), "DEBUG")
                 hookenv.open_port(frontend.port)
     for port in opened_ports:
         if port:
             hookenv.log("Closing port {}".format(port), "DEBUG")
             hookenv.close_port(port)
Exemplo n.º 25
0
def close_old_port():
    config = hookenv.config()
    old_port = config.previous('port')
    try:
        hookenv.close_port(old_port)
    except CalledProcessError:
        hookenv.log('Port %d already closed, skipping.' % old_port)
Exemplo n.º 26
0
def check_ports(new_port):
    kv = unitdata.kv()
    if kv.get('grafana.port') != new_port:
        hookenv.open_port(new_port)
        if kv.get('grafana.port'):  # Dont try to close non existing ports
            hookenv.close_port(kv.get('grafana.port'))
        kv.set('grafana.port', new_port)
Exemplo n.º 27
0
def stop():
    """ Stop hook """
    log('ftb-infinity: stop')
    remove_state(CHARM_STATE_STARTED)
    close_port(conf['server_port'])
    service_stop(CHARM_NAME)
    ftb_systemd_remove()
Exemplo n.º 28
0
def remove_zookeepers_config():
    """When the user removes the relation with zookeeper then the
    zookeepers must be removed from config file. OpenTSDB must be restarted."""
    DB.set('zookeepers', [])
    render_config()
    service_stop('opentsdb')
    close_port(config()['port'])
    remove_state('layer-opentsdb.zookeeper-configured')
Exemplo n.º 29
0
def config_changed():
    conf = hookenv.config()
    for port in ('http_port', 'ssh_port'):
        if conf.changed(port) and conf.previous(port):
            hookenv.close_port(conf.previous(port))
        if conf.get(port):
            hookenv.open_port(conf[port])
    setup()
 def configure_ports(self):
     """Configure listening ports."""
     listen_port = self.charm_config["listen-port"]
     for open_port in hookenv.opened_ports():
         port, protocol = open_port.split("/")
         if protocol != 'udp' and port != listen_port:
             hookenv.close_port(port, protocol=protocol.upper())
     hookenv.open_port(self.charm_config["listen-port"], protocol="UDP")
Exemplo n.º 31
0
def update_service_ports(old_service_ports=None, new_service_ports=None):
    if old_service_ports is None or new_service_ports is None:
        return None
    for port in old_service_ports:
        if port not in new_service_ports:
            close_port(port)
    for port in new_service_ports:
        open_port(port)
Exemplo n.º 32
0
def hbase_rel_removed():
    """When the user removes the relation with HBase then
    OpenTSDB must be restarted. The data is not automatically removed because
    this could lead to unpleasant scenario's. For example: user accidentally removing
    the relation with HBase would result in complete data loss."""
    service_stop('opentsdb')
    close_port(config()['port'])
    remove_state('layer-opentsdb.hbase-configured')
Exemplo n.º 33
0
def update_service_ports(old_service_ports=None, new_service_ports=None):
    if old_service_ports is None or new_service_ports is None:
        return None
    for port in old_service_ports:
        if port not in new_service_ports:
            close_port(port)
    for port in new_service_ports:
        open_port(port)
Exemplo n.º 34
0
def check_port(key, new_port):
    unitdata_key = '{}.port'.format(key)
    kv = unitdata.kv()
    if kv.get(unitdata_key) != new_port:
        hookenv.open_port(new_port)
        if kv.get(unitdata_key):  # Dont try to close non existing ports
            hookenv.close_port(kv.get(unitdata_key))
        kv.set(unitdata_key, new_port)
Exemplo n.º 35
0
def config_changed():
    conf = hookenv.config()
    for port in ('http_port', 'ssh_port'):
        if conf.changed(port) and conf.previous(port):
            hookenv.close_port(conf.previous(port))
        if conf.get(port):
            hookenv.open_port(conf[port])
    setup()
Exemplo n.º 36
0
def config_changed():
    config = hookenv.config()
    for port_name in ('api_port', 'admin_port', 'graphite_port'):
        if config.changed(port_name):
            if config.previous(port_name):
                hookenv.close_port(config.previous(port_name))
            hookenv.open_port(config[port_name])
    set_state('influxdb.configured')
Exemplo n.º 37
0
def wants_etcd():
    if host.service_available('zetcd') and host.service_running('zetcd'):
        host.service_stop('zetcd')
    hookenv.close_port(2181)
    try:
        os.unlink('/etc/systemd/system/zetcd.service')
    except:
        pass
    hookenv.status_set('blocked', 'waiting for relation to etcd')
Exemplo n.º 38
0
def close_open_ports():
    """Close the previous port and open the port from configuration."""
    configuration = hookenv.config()
    previous_port = configuration.previous("port")
    port = configuration.get("port")
    if previous_port is not None and previous_port != port:
        log("The port changed; closing {0} opening {1}".format(previous_port, port))
        close_port(previous_port)
        open_port(port)
Exemplo n.º 39
0
def configure_webserver():
    render_plugin_config()

    if hookenv.config('enable_webhooks'):
        hookenv.open_port(WEBHOOKS_PORT)
        set_state('errbot.webhooks-enabled')
    else:
        hookenv.close_port(WEBHOOKS_PORT)
        remove_state('errbot.webhooks-enabled')
Exemplo n.º 40
0
def configure_webserver():
    render_plugin_config()

    if hookenv.config('enable_webhooks'):
        hookenv.open_port(WEBHOOKS_PORT)
        set_state('errbot.webhooks-enabled')
    else:
        hookenv.close_port(WEBHOOKS_PORT)
        remove_state('errbot.webhooks-enabled')
def prometheus_scape_broken():
    """
    scrape relation broken
    the relation has been completely removed
    disable prometheus plugin and close port
    """
    rabbit.disable_plugin(PROM_PLUGIN)
    close_port(RMQ_MON_PORT)
    log("scrape relation broken, disabled plugin and close port", level=INFO)
def stop_kafka_waiting_for_zookeeper_ready():
    hookenv.status_set(
        'maintenance',
        'zookeeper not ready, stopping confluent_schema_registry')
    schemareg = ConfluentSchemaRegistry()
    hookenv.close_port(SCHEMA_REG_PORT)
    schemareg.stop()
    remove_state('confluent_schema_registry.started')
    hookenv.status_set('waiting', 'waiting for zookeeper to become ready')
Exemplo n.º 43
0
def open_port():
    """
    Open the port that is requested for the service and close the others.
    """
    config = hookenv.config()
    port_config = PORTS.get(config['service_type'])
    if port_config:
        hookenv.open_port(port_config['open'], protocol='TCP')
        for port in port_config['close']:
            hookenv.close_port(port, protocol='TCP')
Exemplo n.º 44
0
def close_open_ports():
    ''' Close the previous port and open the port from configuration. '''
    configuration = hookenv.config()
    previous_port = configuration.previous('port')
    port = configuration.get('port')
    if previous_port is not None and previous_port != port:
        log('The port changed; closing {0} opening {1}'.format(previous_port,
            port))
        close_port(previous_port)
        open_port(port)
Exemplo n.º 45
0
def wsgi_relation_broken():
    """
    When WSGI relation (e.g.: gunicorn) goes away
    """

    log('Hook function: wsgi_relation_broken')

    config_data = ansible_config()

    close_port(config_data['listen_port'])
Exemplo n.º 46
0
def close_open_ports():
    ''' Close the previous port and open the port from configuration. '''
    configuration = hookenv.config()
    previous_port = configuration.previous('port')
    port = configuration.get('port')
    if previous_port is not None and previous_port != port:
        log('The port changed; closing {0} opening {1}'.format(
            previous_port, port))
        close_port(previous_port)
        open_port(port)
def config_changed():

    if config('prefer-ipv6'):
        rabbit.assert_charm_supports_ipv6()

    # Add archive source if provided
    add_source(config('source'), config('key'))
    apt_update(fatal=True)
    # Copy in defaults file for updated ulimits
    shutil.copyfile(
        'templates/rabbitmq-server',
        '/etc/default/rabbitmq-server')
    # Install packages to ensure any changes to source
    # result in an upgrade if applicable.
    status_set('maintenance', 'Installing/upgrading RabbitMQ packages')
    apt_install(rabbit.PACKAGES, fatal=True)

    open_port(5672)

    chown(RABBIT_DIR, rabbit.RABBIT_USER, rabbit.RABBIT_USER)
    chmod(RABBIT_DIR, 0o775)

    configure_nodename()

    if config('management_plugin') is True:
        rabbit.enable_plugin(MAN_PLUGIN)
        open_port(55672)
    else:
        rabbit.disable_plugin(MAN_PLUGIN)
        close_port(55672)

    rabbit.set_all_mirroring_queues(config('mirroring-queues'))
    rabbit.ConfigRenderer(
        rabbit.CONFIG_FILES).write_all()

    if is_relation_made("ha"):
        ha_is_active_active = config("ha-vip-only")

        if ha_is_active_active:
            update_nrpe_checks()
        else:
            if is_elected_leader('res_rabbitmq_vip'):
                update_nrpe_checks()
            else:
                log("hacluster relation is present but this node is not active"
                    " skipping update nrpe checks")
    else:
        update_nrpe_checks()

    # NOTE(jamespage)
    # trigger amqp_changed to pickup and changes to network
    # configuration via the access-network config option.
    for rid in relation_ids('amqp'):
        for unit in related_units(rid):
            amqp_changed(relation_id=rid, remote_unit=unit)
Exemplo n.º 48
0
Arquivo: oostore.py Projeto: cmars/oo
def config_changed():
    config = hookenv.config()
    if config.changed('http_port'):
        if config.previous('http_port'):
            hookenv.close_port(config.previous('http_port'))
        hookenv.open_port(config['http_port'])
    if config.changed('https_port'):
        if config.previous('https_port'):
            hookenv.close_port(config.previous('https_port'))
        hookenv.open_port(config['https_port'])
    set_state('oostore.configured')
Exemplo n.º 49
0
def zookeeper_removed():
    hookenv.status_set('maintenance', 'Removing Apache NiFi from cluster')
    re_edit_in_place('{}/files/nifi-1.1.1/conf/nifi.properties'.format(hookenv.charm_dir()), {
        r'.*nifi.cluster.is.node.*': 'nifi.cluster.is.node=false'
    })
    hookenv.close_port(hookenv.config()['cluster-port'])
    if service_restart('nifi'):
        remove_state('apache-nifi.cluster')
        hookenv.status_set('active', 'Running: standalone mode')
    else:
        hookenv.status_set('error', 'Failed to restart')
Exemplo n.º 50
0
def install_openvpn_xenial():
    puppet = Puppet()
    try:
        os.makedirs('/opt/openvpn-puppet')
    except OSError as exception:
        if exception.errno != errno.EEXIST:
            raise
    conf = config()
    dns_info = get_dns_info()
    clients = conf['clients'].split()
    eipndict = get_extip_and_networks()
    ext_ip = eipndict['external-ip']
    pub_ip = eipndict['external-ip']
    # If public-address is different from private-address, we're probably in a
    # juju-supported cloud that we can trust to give us the right address that
    # clients need to use to connect to us.
    if unit_get('private-address') != unit_get('public-address'):
        pub_ip = unit_get('public-address')
    internal_networks = eipndict['internal-networks']
    context = {
        'servername': SERVERNAME,
        'country': conf['key-country'],
        'province': conf['key-province'],
        'city': conf['key-city'],
        'organization': conf['key-org'],
        'email': conf['key-email'],
        'protocol': conf['protocol'],
        'port': conf['port'],
        'duplicate_cn': conf['duplicate-cn'],
        'push_dns': conf['push-dns'],
        'push_default_gateway': conf['push-default-gateway'],
        'dns_server': dns_info.get('nameserver', "8.8.8.8"),
        'dns_search_domains': dns_info.get('search', []),
        'clients': clients,
        'ext_ip': ext_ip,
        'pub_ip': pub_ip,
        'internal_networks': internal_networks,
    }
    templating.render(
        source='init.pp',
        target='/opt/openvpn-puppet/init.pp',
        context=context
    )
    kv_store = unitdata.kv()
    if kv_store.get('previous-port') and kv_store.get('previous-protocol'):
        close_port(kv_store.get('previous-port'),
                   protocol=kv_store.get('previous-protocol'))
    puppet.apply('/opt/openvpn-puppet/init.pp')
    copy_client_configs_to_home(clients)
    status_set('active', 'Ready')
    open_port(conf['port'], protocol=conf['protocol'].upper())
    kv_store.set('previous-port', conf['port'])
    kv_store.set('previous-protocol', conf['protocol'].upper())
Exemplo n.º 51
0
def config_changed():
    config = hookenv.config()
    if not reactive.is_state('apache.available') or not config.changed('port'):
        return
    with open('apache.yaml') as fp:
        workload = yaml.safe_load(fp)
    for name, site in workload['sites'].items():
        configure_site(name, site)
    if reactive.is_state('apache.started'):
        hookenv.close_port(config.previous('port'))
        assert host.service_reload('apache2'), 'Failed to reload Apache'
        hookenv.open_port(config['port'])
    hookenv.status_set('maintenance', '')
Exemplo n.º 52
0
def fiche_server_port_changed():

    """ React to fiche-server-port changed
    """
    status_set('maintenance', 'Reconfiguring fiche-server-port')
    conf = config()
    # Check and change open port, close prev port
    if conf.previous('fiche-server-port') and \
       conf.previous('fiche-server-port') != config('fiche-server-port'):
        close_port(config.previous('fiche-server-port'))
    # Remove state to re-render systemd conf
    remove_state('fiche.systemd.configured')
    remove_state('fiche.available')
Exemplo n.º 53
0
def fiche_port_changed():

    """ React to fiche front end port changed
    """
    status_set('maintenance', 'Reconfiguring fiche front end port')
    conf = config()
    # Close prev port
    if conf.previous('port') and \
       conf.previous('port') != config('port'):
        close_port(conf.previous('port'))
        # Remove state to re-render nginx conf
        remove_state('fiche.web.configured')
        remove_state('fiche.available')
Exemplo n.º 54
0
def launch_default_ingress_controller():
    ''' Launch the Kubernetes ingress controller & default backend (404) '''
    context = {}
    context['arch'] = arch()
    addon_path = '/root/cdk/addons/{}'

    # Render the default http backend (404) replicationcontroller manifest
    manifest = addon_path.format('default-http-backend.yaml')
    render('default-http-backend.yaml', manifest, context)
    hookenv.log('Creating the default http backend.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log('Failed to create default-http-backend. Will attempt again next update.')  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    # Render the ingress replication controller manifest
    manifest = addon_path.format('ingress-replication-controller.yaml')
    render('ingress-replication-controller.yaml', manifest, context)
    hookenv.log('Creating the ingress replication controller.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log('Failed to create ingress controller. Will attempt again next update.')  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    set_state('kubernetes-worker.ingress.available')
    hookenv.open_port(80)
    hookenv.open_port(443)
def recycle_networking_and_app():
    cfg = config()
    # guard on first run, no previous values, so do nothing.
    if not cfg.previous('vote_port') or not cfg.previous('result_port'):
        return

    # Close previously configured ports
    status_set('maintenance', 'Re-configuring port bindings.')
    close_port(cfg.previous('vote_port'))
    close_port(cfg.previous('result_port'))
    # as the open port and app spinup are in another method, consume
    # that and tell juju to re-execute that method body by removing
    # the idempotency state
    remove_state('voting-app.standalone.running')
    remove_state('voting-app.running')
Exemplo n.º 56
0
def render_and_launch_ingress():
    ''' If configuration has ingress daemon set enabled, launch the ingress load
    balancer and default http backend. Otherwise attempt deletion. '''
    config = hookenv.config()
    # If ingress is enabled, launch the ingress controller
    if config.get('ingress'):
        launch_default_ingress_controller()
    else:
        hookenv.log('Deleting the http backend and ingress.')
        kubectl_manifest('delete',
                         '/root/cdk/addons/default-http-backend.yaml')
        kubectl_manifest('delete',
                         '/root/cdk/addons/ingress-daemon-set.yaml')  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
def stop_nodemanager():
    hookenv.status_set('maintenance', 'stopping nodemanager')
    stopped = host.service_stop('hadoop-yarn-nodemanager')
    if stopped:
        hookenv.status_set('maintenance', 'nodemanager stopped')
    else:
        hookenv.log('NodeManager failed to stop')
        hookenv.status_set('blocked', 'nodemanager failed to stop')

    # Even if the service failed to stop, we want to treat it as stopped so
    # other apps do not attempt to interact with it. Remove .started and
    # close our ports.
    remove_state('apache-bigtop-nodemanager.started')
    for port in get_layer_opts().exposed_ports('nodemanager'):
        hookenv.close_port(port)
Exemplo n.º 58
0
def update_general_config():
    config = hookenv.config()
    target = path.join(node_dist_dir(), 'config.js')
    render(source='config.js.template',
           target=target,
           context=config)

    if config.changed('port'):
        hookenv.log('Changing ports: {} -> {}'.format(
            config.previous('port'),
            config['port']
        ))
        if config.previous('port'):
            hookenv.close_port(config.previous('port'))
        hookenv.open_port(config['port'])