Example #1
0
 def __call__(self, manager, service_name, event_name):
     service = manager.get_service(service_name)
     # turn this generator into a list,
     # as we'll be going over it multiple times
     new_ports = list(service.get('ports', []))
     port_file = os.path.join(hookenv.charm_dir(),
                              '.{}.ports'.format(service_name))
     if os.path.exists(port_file):
         with open(port_file) as fp:
             old_ports = fp.read().split(',')
         for old_port in old_ports:
             if bool(old_port) and not self.ports_contains(
                     old_port, new_ports):
                 hookenv.close_port(old_port)
     with open(port_file, 'w') as fp:
         fp.write(','.join(str(port) for port in new_ports))
     for port in new_ports:
         # A port is either a number or 'ICMP'
         protocol = 'TCP'
         if str(port).upper() == 'ICMP':
             protocol = 'ICMP'
         if event_name == 'start':
             hookenv.open_port(port, protocol)
         elif event_name == 'stop':
             hookenv.close_port(port, protocol)
Example #2
0
def install_presentation():

    """ Install presentation
    """

    opts = layer.options('git-deploy')

    # Clone repo
    hookenv.status_set('maintenance', 
                       'Installing and building the presentation.')

    # Build and install
    with chdir(opts.get('target')):
        with open('requirements.txt', 'r') as f:
            for i in list(map(lambda b: b.strip('\n'), f.readlines())):
                pip_install(i)

        sphinx_build_cmd = 'sphinx-build -b html source %s' % opts.get('target')
        subprocess.call(sphinx_build_cmd.split(), shell=False)
    present_chown_cmd = 'chown -R www-data:www-data %s' % opts.get('target')
    subprocess.call(present_chown_cmd.split(), shell=False)   
    
    # Configure nginx vhost
    configure_site('present', 'present.vhost', app_path=opts.get('target'))

    # Open presentation front-end port
    hookenv.open_port(config['port'])

    # Set status
    hookenv.status_set('active', 
                       'Presentation is active on port %s' % config['port'])
    # Set state
    set_state('presentation.available')
Example #3
0
def install_load_balancer(apiserver, tls):
    ''' Create the default vhost template for load balancing '''
    # Get the tls paths from the layer data.
    layer_options = layer.options('tls-client')
    server_cert_path = layer_options.get('server_certificate_path')
    cert_exists = server_cert_path and os.path.isfile(server_cert_path)
    server_key_path = layer_options.get('server_key_path')
    key_exists = server_key_path and os.path.isfile(server_key_path)
    # Do both the the key and certificate exist?
    if cert_exists and key_exists:
        # At this point the cert and key exist, and they are owned by root.
        chown = ['chown', 'www-data:www-data', server_cert_path]
        # Change the owner to www-data so the nginx process can read the cert.
        subprocess.call(chown)
        chown = ['chown', 'www-data:www-data', server_key_path]
        # Change the owner to www-data so the nginx process can read the key.
        subprocess.call(chown)

        hookenv.open_port(hookenv.config('port'))
        services = apiserver.services()
        nginx.configure_site(
                'apilb',
                'apilb.conf',
                server_name='_',
                services=services,
                port=hookenv.config('port'),
                server_certificate=server_cert_path,
                server_key=server_key_path,
        )
        hookenv.status_set('active', 'Loadbalancer ready.')
Example #4
0
def install_load_balancer(apiserver, tls):
    ''' Create the default vhost template for load balancing '''
    # Get the tls paths from the layer data.
    layer_options = layer.options('tls-client')
    server_cert_path = layer_options.get('server_certificate_path')
    cert_exists = server_cert_path and os.path.isfile(server_cert_path)
    server_key_path = layer_options.get('server_key_path')
    key_exists = server_key_path and os.path.isfile(server_key_path)
    # Do both the key and certificate exist?
    if cert_exists and key_exists:
        # At this point the cert and key exist, and they are owned by root.
        chown = ['chown', 'www-data:www-data', server_cert_path]

        # Change the owner to www-data so the nginx process can read the cert.
        subprocess.call(chown)
        chown = ['chown', 'www-data:www-data', server_key_path]

        # Change the owner to www-data so the nginx process can read the key.
        subprocess.call(chown)

        port = hookenv.config('port')
        hookenv.open_port(port)
        services = apiserver.services()
        nginx.configure_site(
            'apilb',
            'apilb.conf',
            server_name='_',
            services=services,
            port=port,
            server_certificate=server_cert_path,
            server_key=server_key_path,
            proxy_read_timeout=hookenv.config('proxy_read_timeout'))

        maybe_write_apilb_logrotate_config()
        hookenv.status_set('active', 'Loadbalancer ready.')
Example #5
0
def launch_default_ingress_controller():
    ''' Launch the Kubernetes ingress controller & default backend (404) '''
    context = {}
    context['arch'] = arch()
    addon_path = '/root/cdk/addons/{}'

    # Render the default http backend (404) replicationcontroller manifest
    manifest = addon_path.format('default-http-backend.yaml')
    render('default-http-backend.yaml', manifest, context)
    hookenv.log('Creating the default http backend.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log('Failed to create default-http-backend. Will attempt again next update.')  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    # Render the ingress replication controller manifest
    manifest = addon_path.format('ingress-replication-controller.yaml')
    render('ingress-replication-controller.yaml', manifest, context)
    hookenv.log('Creating the ingress replication controller.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log('Failed to create ingress controller. Will attempt again next update.')  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    set_state('kubernetes-worker.ingress.available')
    hookenv.open_port(80)
    hookenv.open_port(443)
Example #6
0
def initialize_new_leader():
    ''' Create an initial cluster string to bring up a single member cluster of
    etcd, and set the leadership data so the followers can join this one. '''
    bag = EtcdDatabag()
    bag.token = bag.token
    bag.cluster_state = 'new'
    address = get_ingress_address('cluster')
    cluster_connection_string = get_connection_string([address],
                                                      bag.management_port)
    bag.cluster = "{}={}".format(bag.unit_name, cluster_connection_string)

    render_config(bag)
    host.service_restart(bag.etcd_daemon)

    # sorry, some hosts need this. The charm races with systemd and wins.
    time.sleep(2)

    # Check health status before we say we are good
    etcdctl = EtcdCtl()
    status = etcdctl.cluster_health()
    if 'unhealthy' in status:
        status_set('blocked', 'Cluster not healthy.')
        return
    # We have a healthy leader, broadcast initial data-points for followers
    open_port(bag.port)
    leader_connection_string = get_connection_string([address],
                                                     bag.port)
    leader_set({'token': bag.token,
                'leader_address': leader_connection_string,
                'cluster': bag.cluster})

    # finish bootstrap delta and set configured state
    set_state('etcd.leader.configured')
Example #7
0
def configure_vault(context):
    log("Running configure_vault", level=DEBUG)
    context['disable_mlock'] = is_container() or config('disable-mlock')

    context['ssl_available'] = is_state('vault.ssl.available')

    if is_flag_set('etcd.tls.available'):
        etcd = endpoint_from_flag('etcd.available')
        log("Etcd detected, adding to context", level=DEBUG)
        context['etcd_conn'] = etcd.connection_string()
        context['etcd_tls_ca_file'] = '/var/snap/vault/common/etcd-ca.pem'
        context['etcd_tls_cert_file'] = '/var/snap/vault/common/etcd-cert.pem'
        context['etcd_tls_key_file'] = '/var/snap/vault/common/etcd.key'
        save_etcd_client_credentials(etcd,
                                     key=context['etcd_tls_key_file'],
                                     cert=context['etcd_tls_cert_file'],
                                     ca=context['etcd_tls_ca_file'])
        context['api_addr'] = vault.get_api_url()
        context['cluster_addr'] = vault.get_cluster_url()
        log("Etcd detected, setting api_addr to {}".format(
            context['api_addr']))
    else:
        log("Etcd not detected", level=DEBUG)
    log("Rendering vault.hcl.j2", level=DEBUG)
    render('vault.hcl.j2', VAULT_CONFIG, context, perms=0o600)
    log("Rendering vault systemd configuation", level=DEBUG)
    render('vault.service.j2', VAULT_SYSTEMD_CONFIG, {}, perms=0o644)
    service('enable', 'vault')
    log("Opening vault port", level=DEBUG)
    open_port(8200)
    set_flag('configured')
    if any_file_changed([VAULT_CONFIG, VAULT_SYSTEMD_CONFIG]):
        # force a restart if config has changed
        clear_flag('started')
Example #8
0
def config_changed():
    config = hookenv.config()
    if config.changed('port'):
        if config.previous('port'):
            hookenv.close_port(config.previous('port'))
        hookenv.open_port(config['port'], protocol='UDP')
    set_state('statsd.configured')
Example #9
0
def config_changed_postupgrade():
    # Ensure ssl dir exists and is unison-accessible
    ensure_ssl_dir()

    check_call(["chmod", "-R", "g+wrx", "/var/lib/keystone/"])

    ensure_ssl_dirs()

    save_script_rc()
    if run_in_apache():
        # Need to ensure mod_wsgi is installed and apache2 is reloaded
        # immediatly as charm querys its local keystone before restart
        # decorator can fire
        apt_install(filter_installed_packages(determine_packages()))
        # when deployed from source, init scripts aren't installed
        if not git_install_requested():
            service_pause("keystone")
        CONFIGS.write(WSGI_KEYSTONE_API_CONF)
        if not is_unit_paused_set():
            restart_pid_check("apache2")
    configure_https()
    open_port(config("service-port"))

    update_nrpe_config()
    CONFIGS.write_all()

    initialise_pki()

    update_all_identity_relation_units()

    # Ensure sync request is sent out (needed for any/all ssl change)
    send_ssl_sync_request()

    for r_id in relation_ids("ha"):
        ha_joined(relation_id=r_id)
def install_layer_openmano():
    status_set('maintenance', 'Installing')

    cfg = config()

    # TODO change user home
    # XXX security issue!
    host.adduser(USER, password=USER)

    # TODO check out a branch
    dest_dir = install_remote(
        cfg['source'],
        dest=INSTALL_PATH,
        depth='1',
        branch='master',
    )
    os.mkdir(os.path.join(dest_dir, 'logs'))
    host.chownr(dest_dir, USER, USER)
    kvdb.set('repo', dest_dir)

    os.mkdir('/home/{}/bin'.format(USER))

    os.symlink(
        "{}/openmano".format(dest_dir),
        "/home/{}/bin/openmano".format(USER))
    os.symlink(
        "{}/scripts/openmano-report.sh".format(dest_dir),
        "/home/{}/bin/openmano-report.sh".format(USER))
    os.symlink(
        "{}/scripts/service-openmano.sh".format(dest_dir),
        "/home/{}/bin/service-openmano".format(USER))

    open_port(9090)
    set_state('openmano.installed')
def start_nodemanager():
    hookenv.status_set('maintenance', 'starting nodemanager')
    host.service_start('hadoop-yarn-nodemanager')
    for port in get_layer_opts().exposed_ports('nodemanager'):
        hookenv.open_port(port)
    set_state('nodemanager.started')
    hookenv.status_set('active', 'ready')
def start_datanode(namenode):
    hookenv.status_set('maintenance', 'starting datanode')
    # NB: service should be started by install, but we want to verify it is
    # running before we set the .started state and open ports. We always
    # restart here, which may seem heavy-handed. However, restart works
    # whether the service is currently started or stopped. It also ensures the
    # service is using the most current config.
    started = host.service_restart('hadoop-hdfs-datanode')
    if started:
        # Create a /user/ubuntu dir in HDFS (this is safe to run multiple times).
        bigtop = Bigtop()
        if not bigtop.check_hdfs_setup():
            try:
                utils.wait_for_hdfs(30)
                bigtop.setup_hdfs()
            except utils.TimeoutError:
                # HDFS is not yet available or is still in safe mode, so we can't
                # do the initial setup (create dirs); skip setting the .started
                # state below so that we try again on the next hook.
                hookenv.status_set('waiting', 'waiting on hdfs')
                return

        # HDFS is ready. Open ports and set .started, status, and app version
        for port in get_layer_opts().exposed_ports('datanode'):
            hookenv.open_port(port)
        set_state('apache-bigtop-datanode.started')
        hookenv.status_set('maintenance', 'datanode started')
        hookenv.application_version_set(get_hadoop_version())
    else:
        hookenv.log('DataNode failed to start')
        hookenv.status_set('blocked', 'datanode failed to start')
        remove_state('apache-bigtop-datanode.started')
        for port in get_layer_opts().exposed_ports('datanode'):
            hookenv.close_port(port)
Example #13
0
def install():
    config = hookenv.config()

    host.adduser(USER, password='')
    host.mkdir(BASE_DIR, owner=USER, group=USER)

    # Meteor install script needs this
    os.environ['HOME'] = os.path.expanduser('~' + USER)

    hookenv.log('Installing dependencies')
    fetch.add_source(NODEJS_REPO)
    fetch.apt_update()
    fetch.apt_install(PACKAGES)

    hookenv.log('Installing Meteor')
    subprocess.check_call(DOWNLOAD_CMD.split())
    subprocess.check_call(INSTALL_CMD.split())
    subprocess.check_call('npm install -g meteorite'.split())

    init_code(config)
    init_bundle(config)
    init_dependencies(config)

    hookenv.open_port(config['port'])
    subprocess.check_call(
        ['chown', '-R', '{user}:{user}'.format(user=USER), BASE_DIR])

    config['mongo_url'] = ''
    write_upstart(config)
Example #14
0
File: oostore.py Project: cmars/oo
def oostore_started():
    config = hookenv.config()
    hookenv.open_port(config['http_port'])
    if config.get('cert') and config.get('key'):
        hookenv.open_port(config['https_port'])
    
    hookenv.status_set('active', 'Ready')
Example #15
0
def install():
    """Install http-test"""
    subprocess.check_call(['hostnamectl', 'set-hostname', 'http-test'])
    try:
        # update needed because of weird error
        hookenv.log("Installing dependencies")
        subprocess.check_output(['apt-get', 'update'])
        subprocess.check_output([
            'pip3',
            'install',
            'Flask',
        ])
    except subprocess.CalledProcessError as exception:
        hookenv.log(exception.output)
        exit(1)
    mergecopytree(charm_dir() + '/files/http-test', "/opt/http-test")
    hookenv.log("Extracting and moving required files and folders")
    hookenv.log("Generating upstart file")
    with open(charm_dir() + '/templates/upstart.conf', 'r') as upstart_t_file:
        upstart_template = upstart_t_file.read()
    with open('/etc/init/http-test.conf', 'w') as upstart_file:
        upstart_file = upstart_file.write(upstart_template)
    hookenv.log("Starting http-test service")
    try:
        subprocess.check_output(['service', 'http-test', 'start'])
    except subprocess.CalledProcessError as exception:
        hookenv.log(exception.output)
        exit(1)
    open_port(5000)
    status_set('active', 'Ready')
    set_state('http-test.installed')
Example #16
0
def start_rest2kafka():
    success = host.service_start('rest2kafka')
    if not success:
        print("starting rest2kafka failed!")
        exit(1)
    open_port('5000')
    set_state('rest2kafka.started')
def install():
    status_set('maintenance', 'Executing pre-install')
    execd_preinstall()
    openstack_origin = config('openstack-origin')
    configure_installation_source(openstack_origin)
    neutron_plugin = config('neutron-plugin')
    additional_install_locations(neutron_plugin, openstack_origin)

    add_source(config('extra-source'), config('extra-key'))
    status_set('maintenance', 'Installing apt packages')
    apt_update(fatal=True)
    packages = determine_packages(openstack_origin)
    apt_install(packages, fatal=True)

    for port in determine_ports():
        open_port(port)

    if neutron_plugin == 'midonet':
        mkdir('/etc/neutron/plugins/midonet',
              owner='neutron',
              group='neutron',
              perms=0o755,
              force=False)
    # call the policy overrides handler which will install any policy overrides
    maybe_do_policyd_overrides(
        os_release('neutron-server'),
        'neutron',
        restart_handler=lambda: service_restart('neutron-server'))
def start_datanode():
    hookenv.status_set('maintenance', 'starting datanode')
    host.service_start('hadoop-hdfs-datanode')
    for port in get_layer_opts().exposed_ports('datanode'):
        hookenv.open_port(port)
    set_state('datanode.started')
    hookenv.status_set('active', 'ready')
Example #19
0
def configure_webapp():
    context = {'hostname': HOST, 'user': USER, 'rootdir': API_DIR}
    render('http.conf', '/etc/nginx/sites-enabled/sojobo.conf', context)
    open_port(config()['port'])
    service_restart('nginx')
    set_state('api.configured')
    status_set('blocked', 'Waiting for a connection with Redis')
Example #20
0
def render_systemd_conf():
    """Render fiche systemd conf
    """

    if config('fqdn'):
        server_name = config('fqdn')
    else:
        server_name = unit_public_ip()

    # Systemd vars
    SYSTEMD_CTXT = {
        'fiche_server_address': server_name,
        'fiche_server_port': config('fiche-server-port'),
        'slug_size': config('slug-size'),
        'buffer_size': config('buffer-size')
    }

    if os.path.exists('/etc/systemd/system/fiche.service'):
        os.remove('/etc/systemd/system/fiche.service')

    # Render systemd template
    render(source="fiche.service.tmpl",
           target="/etc/systemd/system/fiche.service",
           perms=0o644,
           owner="root",
           context=SYSTEMD_CTXT)

    # Open fiche server port
    open_port(config('fiche-server-port'))

    # Set 'fiche.systemd.configured'
    set_state('fiche.systemd.configured')
Example #21
0
def install():
    """Install http-test"""
    #subprocess.check_call(['hostnamectl', 'set-hostname', 'http-test'])
    try:
        # update needed because of weird error
        hookenv.log("Installing dependencies")
        subprocess.check_output(['apt-get', 'update'])
        subprocess.check_output(['pip3', 'install', 'Flask',])
    except subprocess.CalledProcessError as exception:
        hookenv.log(exception.output)
        exit(1)
    mergecopytree(charm_dir() + '/files/http-test', "/opt/http-test")
    hookenv.log("Extracting and moving required files and folders")
    hookenv.log("Generating upstart file")
    with open(charm_dir()+'/templates/upstart.conf', 'r') as upstart_t_file:
        upstart_template = upstart_t_file.read()
    with open('/etc/init/http-test.conf', 'w') as upstart_file:
        upstart_file = upstart_file.write(upstart_template)
    hookenv.log("Starting http-test service")
    try:
        subprocess.check_output(['service', 'http-test', 'start'])
    except subprocess.CalledProcessError as exception:
        hookenv.log(exception.output)
        exit(1)
    open_port(5000)
    status_set('active', 'Ready')
    set_state('http-test.installed')
Example #22
0
def config_changed_postupgrade():
    # Ensure ssl dir exists and is unison-accessible
    ensure_ssl_dir()

    check_call(['chmod', '-R', 'g+wrx', '/var/lib/keystone/'])

    ensure_ssl_dirs()

    save_script_rc()
    if run_in_apache():
        # Need to ensure mod_wsgi is installed and apache2 is reloaded
        # immediatly as charm querys its local keystone before restart
        # decorator can fire
        apt_install(filter_installed_packages(determine_packages()))
        service_pause('keystone')
        CONFIGS.write(WSGI_KEYSTONE_CONF)
        restart_pid_check('apache2')
    configure_https()
    open_port(config('service-port'))

    update_nrpe_config()
    CONFIGS.write_all()

    initialise_pki()

    update_all_identity_relation_units()

    # Ensure sync request is sent out (needed for any/all ssl change)
    send_ssl_sync_request()

    for r_id in relation_ids('ha'):
        ha_joined(relation_id=r_id)
Example #23
0
def config_changed():
    if not config('action-managed-upgrade'):
        if openstack_upgrade_available('ceilometer-common'):
            status_set('maintenance', 'Upgrading to new OpenStack release')
            do_openstack_upgrade(CONFIGS)
    install_event_pipeline_setting()
    update_nrpe_config()
    CONFIGS.write_all()
    # NOTE(jamespage): Drop when charm switches to apache2+mod_wsgi
    #                  reload ensures port override is set correctly
    reload_systemd()
    ceilometer_joined()

    cmp_codename = CompareOpenStackReleases(
        get_os_codename_install_source(config('openstack-origin')))
    if cmp_codename < 'queens':
        open_port(CEILOMETER_PORT)
    else:
        close_port(CEILOMETER_PORT)

    configure_https()

    # NOTE(jamespage): Iterate identity-{service,credentials} relations
    #                  to pickup any required databag changes on these
    #                  relations.
    for rid in relation_ids('identity-service'):
        keystone_joined(relid=rid)
    for rid in relation_ids('identity-credentials'):
        keystone_credentials_joined(relid=rid)

    # Define the new ocf resource and use the key delete_resources to delete
    # legacy resource for >= Liberty since the ceilometer-agent-central moved
    # to ceilometer-polling in liberty (see LP: #1606787).
    for rid in relation_ids('ha'):
        ha_joined(rid)
Example #24
0
def restart():
    open_port(port())
    if service_running('omnivector-challenge'):
        service('restart', 'omnivector-challenge')
    else:
        service('start', 'omnivector-challenge')
    status_set("active", "")
Example #25
0
def install_layer_openmano():
    status_set('maintenance', 'Installing')

    cfg = config()

    # TODO change user home
    # XXX security issue!
    host.adduser(USER, password=USER)

    # TODO check out a branch
    dest_dir = install_remote(
        cfg['source'],
        dest=INSTALL_PATH,
        depth='1',
        branch='master',
    )
    os.mkdir(os.path.join(dest_dir, 'logs'))
    host.chownr(dest_dir, USER, USER)
    kvdb.set('repo', dest_dir)

    os.mkdir('/home/{}/bin'.format(USER))

    os.symlink("{}/openmano".format(dest_dir),
               "/home/{}/bin/openmano".format(USER))
    os.symlink("{}/scripts/openmano-report.sh".format(dest_dir),
               "/home/{}/bin/openmano-report.sh".format(USER))
    os.symlink("{}/scripts/service-openmano.sh".format(dest_dir),
               "/home/{}/bin/service-openmano".format(USER))

    open_port(9090)
    set_state('openmano.installed')
Example #26
0
 def update_ports(self):
     opened_ports = str(subprocess.check_output(["opened-ports"]),
                        'utf-8').split('/tcp\n')
     hookenv.log("Opened ports {}".format(opened_ports), "DEBUG")
     for frontend in self.proxy_config.frontends:
         if frontend.port in opened_ports:
             if self.charm_config['enable-stats'] \
                     and self.charm_config['stats-local'] and \
                self.charm_config['stats-port'] == int(frontend.port):
                 hookenv.log(
                     "Stats port set to be closed {}".format(frontend.port),
                     "DEBUG")
             else:
                 hookenv.log("Port already open {}".format(frontend.port),
                             "DEBUG")
                 opened_ports.remove(frontend.port)
         else:
             if self.charm_config['enable-stats'] and \
                     self.charm_config['stats-local'] and \
                self.charm_config['stats-port'] == int(frontend.port):
                 hookenv.log(
                     "Not opening stats port {}".format(frontend.port),
                     "DEBUG")
             else:
                 hookenv.log("Opening {}".format(frontend.port), "DEBUG")
                 hookenv.open_port(frontend.port)
     for port in opened_ports:
         if port:
             hookenv.log("Closing port {}".format(port), "DEBUG")
             hookenv.close_port(port)
def config_changed():
    if config('prefer-ipv6'):
        setup_ipv6()
        status_set('maintenance', 'Sync DB')
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'))

    if git_install_requested():
        if config_value_changed('openstack-origin-git'):
            status_set('maintenance', 'Running Git install')
            git_install(config('openstack-origin-git'))
    elif not config('action-managed-upgrade'):
        if openstack_upgrade_available('glance-common'):
            status_set('maintenance', 'Upgrading OpenStack release')
            do_openstack_upgrade(CONFIGS)

    open_port(9292)
    configure_https()

    update_nrpe_config()

    # Pickup and changes due to network reference architecture
    # configuration
    [keystone_joined(rid) for rid in relation_ids('identity-service')]
    [image_service_joined(rid) for rid in relation_ids('image-service')]
    [cluster_joined(rid) for rid in relation_ids('cluster')]
    for r_id in relation_ids('ha'):
        ha_relation_joined(relation_id=r_id)
Example #28
0
def install():
    """Install REST2JFed"""
    try:
        # update needed because of weird error
        hookenv.log("Installing dependencies")
        subprocess.check_output(['apt-get', 'update'])
        subprocess.check_output([
            'pip2', 'install', 'Jinja2', 'Flask', 'pyyaml', 'click',
            'python-dateutil'
        ])
    except subprocess.CalledProcessError as exception:
        hookenv.log(exception.output)
        exit(1)
    hookenv.log("Extracting and moving required files and folders")
    mergecopytree(charm_dir() + '/files/jfedS4', "/opt/jfedS4")
    mergecopytree(charm_dir() + '/files/rest2jfed', "/opt/rest2jfed")
    hookenv.log("Generating upstart file")
    with open(charm_dir() + '/templates/upstart.conf', 'r') as upstart_t_file:
        upstart_template = upstart_t_file.read()
    with open('/etc/init/rest2jfed.conf', 'w') as upstart_file:
        upstart_file = upstart_file.write(upstart_template)
    hookenv.log("Starting rest2jfed service")
    try:
        subprocess.check_output(['service', 'rest2jfed', 'start'])
    except subprocess.CalledProcessError as exception:
        hookenv.log(exception.output)
        exit(1)
    open_port(5000)
    status_set('active', 'Ready')
    set_state('rest2jfed.installed')
Example #29
0
def install():
    hookenv.log('Installing neo4j')
    config = hookenv.config()
    hookenv.open_port(config['port'])
    fetch.configure_sources(True)
    fetch.apt_install(fetch.filter_installed_packages(['neo4j']))
    set_state('neo4j.installed')
Example #30
0
def configured_devpi():
    status.maintenance('Configuring devpi')

    DEVPI_PATH.mkdir(mode=0o755, parents=True, exist_ok=True)
    devpi_server_bin = DEVPI_ENV_BIN / 'devpi-server'

    # initialize devpi
    adduser('devpi')
    chownr(str(DEVPI_PATH), 'devpi', 'devpi', chowntopdir=True)
    check_call([
        'sudo', '-u', 'devpi',
        str(devpi_server_bin), '--init', '--serverdir',
        str(DEVPI_PATH)
    ])

    # render service
    render('devpi.service',
           '/etc/systemd/system/devpi.service',
           context={
               'devpi_server_bin': devpi_server_bin,
               'devpi_path': str(DEVPI_PATH)
           })

    open_port(3141)

    # enable service
    check_call(['systemctl', 'enable', 'devpi.service'])

    # start service
    check_call(['systemctl', 'start', 'devpi.service'])

    status.active('devpi running')
    set_flag('devpi.configured')
 def __call__(self, manager, service_name, event_name):
     """Open unit ports."""
     service = manager.get_service(service_name)
     new_ports = service.get("ports", [])
     port_file = os.path.join(hookenv.charm_dir(),
                              ".{}.ports".format(service_name))
     if os.path.exists(port_file):
         with open(port_file) as fp:
             old_ports = fp.read().split(",")
         for old_port in old_ports:
             if bool(old_port) and not self.ports_contains(
                     old_port, new_ports):
                 hookenv.close_port(old_port)
     with open(port_file, "w") as fp:
         fp.write(",".join(str(port) for port in new_ports))
     for port in new_ports:
         # A port is either a number or 'ICMP'
         protocol = "TCP"
         if str(port).upper() == "ICMP":
             protocol = "ICMP"
         if event_name == "start":
             try:
                 hookenv.open_port(port, protocol)
             except subprocess.CalledProcessError as err:
                 if err.returncode == 1:
                     hookenv.log(
                         "open_port returns: {}, ignoring".format(err),
                         level=hookenv.INFO,
                     )
                 else:
                     raise
         elif event_name == "stop":
             hookenv.close_port(port, protocol)
Example #32
0
def initialize_new_leader():
    ''' Create an initial cluster string to bring up a single member cluster of
    etcd, and set the leadership data so the followers can join this one. '''
    bag = EtcdDatabag()
    bag.token = bag.token
    bag.cluster_state = 'new'
    cluster_connection_string = get_connection_string([bag.private_address],
                                                      bag.management_port)
    bag.cluster = "{}={}".format(bag.unit_name, cluster_connection_string)
    render('defaults', '/etc/default/etcd', bag.__dict__, owner='root',
           group='root')
    host.service_restart('etcd')

    # sorry, some hosts need this. The charm races with systemd and wins.
    time.sleep(2)

    # Check health status before we say we are good
    etcdctl = EtcdCtl()
    status = etcdctl.cluster_health()
    if 'unhealthy' in status:
        status_set('blocked', 'Cluster not healthy')
        return
    # We have a healthy leader, broadcast initial data-points for followers
    open_port(bag.port)
    leader_connection_string = get_connection_string([bag.private_address],
                                                     bag.port)
    leader_set({'token': bag.token,
                'leader_address': leader_connection_string,
                'cluster': bag.cluster})

    # finish bootstrap delta and set configured state
    set_state('etcd.leader.configured')
Example #33
0
def install():
    if config()['offline'] is False:
        apt_update(fatal=True)
        apt_install(packages=[
            'bind9',
            'dnsutils',
            ], fatal=True)
    else:
        log("Installing offline debian packages")
        install_packages('files/bind')
        # rerun cuz its buggy
        install_packages('files/bind')
        log("Installing Python packages")
        pip_install('files/bind/pip')
    ## use the nameserver in /etc/resolv.conf as a forwarder ...
    import DNS
    DNS.ParseResolvConf("/etc/resolv.conf")
    nameserver = DNS.defaults['server'][0]
    log('Setting dns to be forwarder to :'+nameserver)
    import jinja2
    templateLoader = jinja2.FileSystemLoader( searchpath= os.environ['CHARM_DIR'] )
    #use Jinja2 template to enable bind forwarding
    templateEnv=jinja2.Environment( loader=templateLoader );
    template=templateEnv.get_template('contrib/bind/templates/named.conf.options.jinja2')
    output_from_parsed_template = template.render(forwarder=nameserver)
    # to save the results
    with open("/etc/bind/named.conf.options", "wb") as fh:
        fh.write(output_from_parsed_template)
        ## use jinja2 templates..


    if not os.path.exists('/etc/bind/zone-backup'):
        os.makedirs('/etc/bind/zone-backup')
    open_port(53, "TCP")
    open_port(53, "UDP")
Example #34
0
def renew_cert():
    remove_state('lets-encrypt.renew.requested')
    # We don't want to stop the webserver if no renew is needed.
    if no_renew_needed():
        return
    print("Renewing certificate...")
    configs = config()
    fqdn = configs.get('fqdn')
    needs_start = stop_running_web_service()
    open_port(80)
    open_port(443)
    try:
        output = check_output(['letsencrypt', 'renew', '--agree-tos'],
                              universal_newlines=True,
                              stderr=STDOUT)
        print(output)  # So output shows up in logs
        status_set('active', 'registered %s' % (fqdn))
        set_state('lets-encrypt.renewed')
    except CalledProcessError as err:
        status_set('blocked',
                   'letsencrypt renewal failed: \n{}'.format(err.output))
        print(err.output)  # So output shows up in logs
    finally:
        if needs_start:
            start_web_service()
def install():
    hookenv.status_set('maintenance', 'Executing pre-install')
    execd.execd_preinstall()
    ch_utils.configure_installation_source(hookenv.config('openstack-origin'))

    hookenv.status_set('maintenance', 'Installing apt packages')
    ch_fetch.apt_update()
    ch_fetch.apt_install(ncc_utils.determine_packages(), fatal=True)

    ncc_utils.disable_package_apache_site()
    ncc_utils.stop_deprecated_services()

    _files = os.path.join(hookenv.charm_dir(), 'files')
    if os.path.isdir(_files):
        for f in os.listdir(_files):
            f = os.path.join(_files, f)
            if os.path.isfile(f):
                hookenv.log('Installing %s to /usr/bin' % f)
                shutil.copy2(f, '/usr/bin')
    for port in ncc_utils.determine_ports():
        hookenv.open_port(port)
    msg = 'Disabling services into db relation joined'
    hookenv.log(msg)
    hookenv.status_set('maintenance', msg)
    if not ch_utils.is_unit_paused_set():
        for svc in ncc_utils.services():
            ch_host.service_pause(svc)
    else:
        hookenv.log('Unit is in paused state, not issuing stop/pause '
                    'to all services')
Example #36
0
def check_ports(new_port):
    kv = unitdata.kv()
    if kv.get('grafana.port') != new_port:
        hookenv.open_port(new_port)
        if kv.get('grafana.port'):  # Dont try to close non existing ports
            hookenv.close_port(kv.get('grafana.port'))
        kv.set('grafana.port', new_port)
Example #37
0
def config_changed():
    if config('prefer-ipv6'):
        setup_ipv6()
        status_set('maintenance', 'Sync DB')
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'))

    if git_install_requested():
        if config_value_changed('openstack-origin-git'):
            status_set('maintenance', 'Running Git install')
            git_install(config('openstack-origin-git'))
    elif not config('action-managed-upgrade'):
        if openstack_upgrade_available('glance-common'):
            status_set('maintenance', 'Upgrading OpenStack release')
            do_openstack_upgrade(CONFIGS)

    open_port(9292)
    configure_https()

    update_nrpe_config()

    # Pickup and changes due to network reference architecture
    # configuration
    [keystone_joined(rid) for rid in relation_ids('identity-service')]
    [image_service_joined(rid) for rid in relation_ids('image-service')]
    [cluster_joined(rid) for rid in relation_ids('cluster')]
    for r_id in relation_ids('ha'):
        ha_relation_joined(relation_id=r_id)
Example #38
0
def configure_http(prometheus_target):
    job_name = 'virtfs-exporter'
    log('Register target {}: {}:{}'.format(job_name,
                                           get_ip()[1], config.get('port')))
    open_port(config.get('port'))
    prometheus_target.configure(private_address=get_ip()[1],
                                port=config.get('port'))
Example #39
0
def start_limeds():
    success = host.service_start('limeds')
    if not success:
        print("starting limeds failed!")
        exit(1)
    open_port('8080') # LimeDS running on localhost:80/system/console
    set_state('limeds.started')
def run_container(port=8080):
    '''
    Wrapper method to launch a docker container under the direction of Juju,
    and provide feedback/notifications to the end user.
    '''
    ##TODO: options, such as port etc...
#    if not password:
#        password = config.get('password')
    # Run the ipython docker container.
    hookenv.status_set('maintenance', 'Stopping DeepDetect container')
    # make this cleaner
    try:
        check_call(['docker', 'stop', 'docker-deepdetect'])
    except:
        pass
    try:
        check_call(['docker', 'rm', 'docker-deepdetect'])
    except:
        pass
    run_command = [
        'docker',
        'run',
        '--name',
        'docker-deepdetect',
        '-p',
        '{}:8080'.format(config.get('port')),
        '-d',
        'beniz/deepdetect_cpu'
    ]
    check_call(run_command)
    hookenv.open_port(config.get('port'))  # want you to open this port in cloud firewall
    reactive.remove_state('deepdetect.stopped')
    reactive.set_state('deepdetect.started')
    hookenv.status_set('active', 'DeepDetect container started')
Example #41
0
def start_mattermost_nginx():
    print("Starting NGINX reverseproxy and https endpoint.")
    service_restart('nginx')
    open_port(config().get('port'))
    open_port(443)
    status_set('active', 'Ready (https://{})'.format(config().get('fqdn')))
    set_state('mattermost.nginx.started')
Example #42
0
def zookeeper_config(zookeeper):
    hookenv.status_set('maintenance',
                       'Changing Apache NiFi to run as a cluster')
    hookenv.log(
        'Adding Apache Zookeeper -- Changing Apache NiFi to run as a cluster')
    conf = hookenv.config()
    zookeeper_servers_string = ''
    for zk_unit in zookeeper.zookeepers():
        zookeeper_servers_string += '{}:{},'.format(zk_unit['host'],
                                                    zk_unit['port'])
    re_edit_in_place(
        '%s/files/nifi-1.3.0/conf/nifi.properties' % hookenv.charm_dir(), {
            r'.*nifi.cluster.is.node.*':
            'nifi.cluster.is.node=true',
            r'.*nifi.cluster.node.address.*':
            'nifi.cluster.node.address={}'.format(hookenv.unit_private_ip()),
            r'.*nifi.web.http.port.*':
            'nifi.web.http.port={}'.format(conf['nifi-port']),
            r'.*nifi.cluster.node.protocol.port.*':
            'nifi.cluster.node.protocol.port={}'.format(conf['cluster-port']),
            r'.*nifi.zookeeper.connect.string.*':
            'nifi.zookeeper.connect.string={}'.format(zookeeper_servers_string)
        })
    hookenv.open_port(conf['cluster-port'])
    filesdir = '{}/files'.format(hookenv.charm_dir())
    try:
        subprocess.check_call(
            ['bash', '{}/nifi-1.3.0/bin/nifi.sh'.format(filesdir), 'restart'])
        hookenv.status_set('active', 'Running: cluster mode with Zookeeper')
        set_state('apache-nifi.cluster')
    except subprocess.CalledProcessError:
        hookenv.status_set('blocked', 'Failed to restart')
Example #43
0
    def update_ports(self):
        """Update open ports based on configuration so that Juju can expose them."""
        opened_ports = str(subprocess.check_output(["opened-ports"]),
                           "utf-8").split("/tcp\n")
        hookenv.log("Opened ports {}".format(opened_ports), "DEBUG")

        for frontend in self.proxy_config.frontends:
            if frontend.port in opened_ports:
                if (self.charm_config["enable-stats"]
                        and self.charm_config["stats-local"] and
                        self.charm_config["stats-port"] == int(frontend.port)):
                    hookenv.log(
                        "Stats port set to be closed {}".format(frontend.port),
                        "DEBUG")
                else:
                    hookenv.log("Port already open {}".format(frontend.port),
                                "DEBUG")
                    opened_ports.remove(frontend.port)
            else:
                if (self.charm_config["enable-stats"]
                        and self.charm_config["stats-local"] and
                        self.charm_config["stats-port"] == int(frontend.port)):
                    hookenv.log(
                        "Not opening stats port {}".format(frontend.port),
                        "DEBUG")
                else:
                    hookenv.log("Opening {}".format(frontend.port), "DEBUG")
                    hookenv.open_port(frontend.port)

        for port in opened_ports:
            if port:
                hookenv.log("Closing port {}".format(port), "DEBUG")
                hookenv.close_port(port)
def open_ports():
    '''
    Open port 9200 and 9300
    '''
    open_port(ES_HTTP_PORT)
    open_port(ES_TRANSPORT_PORT)
    set_flag('elasticsearch.ports.available')
Example #45
0
def config_changed():

    if not conf.changed('server_port') and not conf.changed('RAM_MAX'):
        return

    log('ftb-infinity: config_changed')
    cur_status = status_get()
    status_set('maintenance', 'configuring')

    port_changed = conf.changed('server_port')
    ram_changed = conf.changed('RAM_MAX')

    # Let's suppose java will rewrite server.properties on exit
    started = is_state(CHARM_STATE_STARTED)
    if started:
        service_stop(CHARM_NAME)
        sleep(2)

    if port_changed:
        close_port(conf.previous('server_port'))
        ftb_config_server()

    if ram_changed:
        ftb_systemd_install()

    if started:
        service_start(CHARM_NAME)
        if port_changed:
            open_port(conf['server_port'])

    # restore state
    status_set(cur_status[0], cur_status[1])
Example #46
0
def start_mattermost_nginx():
    print("Starting NGINX reverseproxy and https endpoint.")
    service_restart('nginx')
    open_port(config().get('port'))
    open_port(443)
    status_set('active', 'Ready (https://{})'.format(config().get('fqdn')))
    set_state('mattermost.nginx.started')
def run_che():
    status_set('maintenance', 'Installing Eclipse Che')
    # Start and stop Che so che's config is generated
    start_che()
    stop_che()
    # Add Juju stuff to Che config
    json_add_object_to_array(
        "{}/templates/stack-juju-charm.json".format(charm_dir()),
        "/home/ubuntu/instance/data/stacks/stacks.json")
    copyfile("{}/templates/type-juju.svg".format(charm_dir()),
             "/home/ubuntu/instance/data/stacks/images/type-juju.svg")
    json_add_object_to_array(
        "{}/templates/project-template-charms.json".format(charm_dir()),
        "/home/ubuntu/instance/data/templates/samples.json")
    json_add_object_to_array(
        "{}/templates/project-template-interface.json".format(charm_dir()),
        "/home/ubuntu/instance/data/templates/samples.json")
    json_add_object_to_array(
        "{}/templates/project-template-layer.json".format(charm_dir()),
        "/home/ubuntu/instance/data/templates/samples.json")
    # Start Che for real
    start_che()
    # opened ports are used by `juju expose` so It's important to open all
    # ports a user connects to.
    open_port('8080', protocol="TCP")  # Port to the UI
    open_port('32768-65535', protocol="TCP")  # Ports to the workspaces
    status_set('active', 'Ready')
    set_state('che.available')
Example #48
0
def run_container(webroot=None):
    '''
    Wrapper method to launch a docker container under the direction of Juju,
    and provide feedback/notifications to the end user.
    '''
    if not webroot:
        webroot = config.get('webroot')
    # Run the nginx docker container.
    run_command = [
        'docker',
        'run',
        '--restart',
        'on-failure',
        '--name',
        'docker-nginx',
        '-v',
        '{}:/usr/share/nginx/html:ro'.format(webroot),
        '-p',
        '{}:80'.format(config.get('port')),
        '-d',
        'nginx'
    ]
    check_call(run_command)
    hookenv.open_port(config.get('port'))
    reactive.remove_state('nginx.stopped')
    reactive.set_state('nginx.started')
    hookenv.status_set('active', 'Nginx container started')
Example #49
0
def config_changed():
    if config('prefer-ipv6'):
        setup_ipv6()
        status_set('maintenance', 'Sync DB')
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'))

    if not config('action-managed-upgrade'):
        if openstack_upgrade_available('glance-common'):
            status_set('maintenance', 'Upgrading OpenStack release')
            do_openstack_upgrade(CONFIGS)

    open_port(9292)
    configure_https()

    update_nrpe_config()

    # Pickup and changes due to network reference architecture
    # configuration
    [keystone_joined(rid) for rid in relation_ids('identity-service')]
    [image_service_joined(rid) for rid in relation_ids('image-service')]
    [cluster_joined(rid) for rid in relation_ids('cluster')]
    for r_id in relation_ids('ha'):
        ha_relation_joined(relation_id=r_id)

    # NOTE(jamespage): trigger any configuration related changes
    #                  for cephx permissions restrictions
    ceph_changed()
    update_image_location_policy()
Example #50
0
def configure_website():
    config = hookenv.config()
    configure_site('uosci_dashboard', 'uosci.conf', app_path='/var/www/html')
    subprocess.check_call(['uosci-dashboard', '--path', '/var/www/html'])
    hookenv.open_port('80')
    hookenv.status_set('active', 'UOSCI Dashboard is now available')
    set_state('dashboard.init')
Example #51
0
def run_che():
    status_set('maintenance', 'Installing Eclipse Che')
    # Start and stop Che so che's config is generated
    start_che()
    stop_che()
    # Add Juju stuff to Che config
    json_add_object_to_array(
        "{}/templates/stack-juju-charm.json".format(charm_dir()),
        "/home/ubuntu/instance/data/stacks/stacks.json"
    )
    copyfile(
        "{}/templates/type-juju.svg".format(charm_dir()),
        "/home/ubuntu/instance/data/stacks/images/type-juju.svg"
    )
    json_add_object_to_array(
        "{}/templates/project-template-charms.json".format(charm_dir()),
        "/home/ubuntu/instance/data/templates/samples.json"
    )
    json_add_object_to_array(
        "{}/templates/project-template-interface.json".format(charm_dir()),
        "/home/ubuntu/instance/data/templates/samples.json"
    )
    json_add_object_to_array(
        "{}/templates/project-template-layer.json".format(charm_dir()),
        "/home/ubuntu/instance/data/templates/samples.json"
    )
    # Start Che for real
    start_che()
    # opened ports are used by `juju expose` so It's important to open all
    # ports a user connects to.
    open_port('8080', protocol="TCP")           # Port to the UI
    open_port('32768-65535', protocol="TCP")    # Ports to the workspaces
    status_set('active', 'Ready (eclipse/che)')
    set_state('che.available')
def update_charm_status(update_config=True):
    update_config_func = render_config if update_config else None
    result = check_run_prerequisites(CONTAINER_NAME, CONFIG_NAME,
                                     update_config_func, SERVICES_TO_CHECK)
    if not result:
        return

    ctx = get_context()
    missing_relations = []
    if not ctx.get("db_user"):
        # NOTE: Charms don't allow to deploy cassandra in AllowAll mode
        missing_relations.append("contrail-controller-cluster")
    if not ctx.get("analytics_servers"):
        missing_relations.append("contrail-analytics")
    if get_ip() not in ctx.get("controller_servers"):
        missing_relations.append("contrail-cluster")
    if missing_relations:
        status_set('blocked',
                   'Missing relations: ' + ', '.join(missing_relations))
        return
    if not ctx.get("cloud_orchestrator"):
        status_set('blocked', 'Missing cloud orchestrator info in relations.')
        return
    if not ctx.get("keystone_ip"):
        status_set('blocked',
                   'Missing auth info in relation with contrail-auth.')
        return
    # TODO: what should happens if relation departed?

    render_config(ctx)
    for port in ("8082", "8080", "8143"):
        open_port(port, "TCP")

    run_container(CONTAINER_NAME, "contrail-control")
Example #53
0
def check_ports(new_port):
    kv = unitdata.kv()
    if kv.get('grafana.port') != new_port:
        hookenv.open_port(new_port)
        if kv.get('grafana.port'):  # Dont try to close non existing ports
            hookenv.close_port(kv.get('grafana.port'))
        kv.set('grafana.port', new_port)
def configure_rabbit_ssl():
    """
    The legacy config support adds some additional complications.

    ssl_enabled = True, ssl = off -> ssl enabled
    ssl_enabled = False, ssl = on -> ssl enabled
    """
    ssl_mode, external_ca = _get_ssl_mode()

    if ssl_mode == 'off':
        if os.path.exists(rabbit.RABBITMQ_CONF):
            os.remove(rabbit.RABBITMQ_CONF)
        close_port(config('ssl_port'))
        reconfigure_client_ssl()
        return
    ssl_key = _convert_from_base64(config('ssl_key'))
    ssl_cert = _convert_from_base64(config('ssl_cert'))
    ssl_ca = _convert_from_base64(config('ssl_ca'))
    ssl_port = config('ssl_port')

    # If external managed certs then we need all the fields.
    if (ssl_mode in ('on', 'only') and any((ssl_key, ssl_cert)) and
            not all((ssl_key, ssl_cert))):
        log('If ssl_key or ssl_cert are specified both are required.',
            level=ERROR)
        sys.exit(1)

    if not external_ca:
        ssl_cert, ssl_key, ssl_ca = ServiceCA.get_service_cert()

    rabbit.enable_ssl(
        ssl_key, ssl_cert, ssl_port, ssl_ca,
        ssl_only=(ssl_mode == "only"), ssl_client=False)
    reconfigure_client_ssl(True)
    open_port(ssl_port)
Example #55
0
    def open_ports(self):
        '''
        Expose the ports in the configuration to the outside world.

        '''
        for port in self.dist_config.exposed_ports('zookeeper'):
            open_port(port)
def install():
    hookenv.status_set('maintenance', 'Executing pre-install')
    execd.execd_preinstall()
    ch_utils.configure_installation_source(hookenv.config('openstack-origin'))

    hookenv.status_set('maintenance', 'Installing apt packages')
    ch_fetch.apt_update()
    ch_fetch.apt_install(ncc_utils.determine_packages(), fatal=True)

    ncc_utils.disable_package_apache_site()
    ncc_utils.stop_deprecated_services()

    _files = os.path.join(hookenv.charm_dir(), 'files')
    if os.path.isdir(_files):
        for f in os.listdir(_files):
            f = os.path.join(_files, f)
            if os.path.isfile(f):
                hookenv.log('Installing %s to /usr/bin' % f)
                shutil.copy2(f, '/usr/bin')
    for port in ncc_utils.determine_ports():
        hookenv.open_port(port)
    msg = 'Disabling services into db relation joined'
    hookenv.log(msg)
    hookenv.status_set('maintenance', msg)
    if not ch_utils.is_unit_paused_set():
        for svc in ncc_utils.services():
            ch_host.service_pause(svc)
    else:
        hookenv.log('Unit is in paused state, not issuing stop/pause '
                    'to all services')
Example #57
0
def install():
    """Install REST2JFed"""
    try:
        # update needed because of weird error
        hookenv.log("Installing dependencies")
        subprocess.check_output(['apt-get', 'update'])
        subprocess.check_output(['pip2', 'install', 'Jinja2', 'Flask', 'pyyaml', 'click', 'python-dateutil'])
    except subprocess.CalledProcessError as exception:
        hookenv.log(exception.output)
        exit(1)
    hookenv.log("Extracting and moving required files and folders")
    mergecopytree(charm_dir() + '/files/jfedS4', "/opt/jfedS4")
    mergecopytree(charm_dir() + '/files/rest2jfed', "/opt/rest2jfed")
    hookenv.log("Generating upstart file")
    with open(charm_dir()+'/templates/upstart.conf', 'r') as upstart_t_file:
        upstart_template = upstart_t_file.read()
    with open('/etc/init/rest2jfed.conf', 'w') as upstart_file:
        upstart_file = upstart_file.write(upstart_template)
    hookenv.log("Starting rest2jfed service")
    try:
        subprocess.check_output(['service', 'rest2jfed', 'start'])
    except subprocess.CalledProcessError as exception:
        hookenv.log(exception.output)
        exit(1)
    open_port(5000)
    status_set('active', 'Ready')
    set_state('rest2jfed.installed')
def config_changed():
    if config('prefer-ipv6'):
        setup_ipv6()
        status_set('maintenance', 'Sync DB')
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'))

    if not config('action-managed-upgrade'):
        if openstack_upgrade_available('glance-common'):
            status_set('maintenance', 'Upgrading OpenStack release')
            do_openstack_upgrade(CONFIGS)

    open_port(9292)
    configure_https()

    update_nrpe_config()

    # Pickup and changes due to network reference architecture
    # configuration
    [keystone_joined(rid) for rid in relation_ids('identity-service')]
    [image_service_joined(rid) for rid in relation_ids('image-service')]
    [cluster_joined(rid) for rid in relation_ids('cluster')]
    for r_id in relation_ids('ha'):
        ha_relation_joined(relation_id=r_id)

    # NOTE(jamespage): trigger any configuration related changes
    #                  for cephx permissions restrictions
    ceph_changed()
    update_image_location_policy()