Ejemplo n.º 1
0
def configure_jupyter_notebook():
    conf = hookenv.config()
    jupyter_dir = '/opt/jupyter'
    port = conf['open-port']
    # Get or create and get password
    kv_store = unitdata.kv()
    password = kv_store.get('password')
    if not password:
        password = generate_password()
        kv_store.set('password', password)
    # Convert to string because some functions can't handle kv object type.
    password = str(password)
    password_hash = generate_hash(password)
    context = {
        'port': port,
        'password_hash': password_hash,
    }
    if data_changed('jupyter-conf', context):
        # Create config directory and render config file
        host.mkdir(jupyter_dir)
        templating.render(
            source='jupyter_notebook_config.py.jinja2',
            target=jupyter_dir + '/jupyter_notebook_config.py',
            context=context
        )
        # Generate upstart template / service file
        context = {}
        if lsb_release.get_lsb_information()['RELEASE'] == "14.04":
            render_api_upstart_template(context)
        else:
            render_api_systemd_template(context)
        restart_notebook()
    chownr(jupyter_dir, 'ubuntu', 'ubuntu', chowntopdir=True)
Ejemplo n.º 2
0
    def configure_events_dir(self, mode):
        """
        Create directory for spark event data.

        This directory is used by workers to store event data. It is also read
        by the history server when displaying event information.

        :param string mode: Spark execution mode to determine the dir location.
        """
        dc = self.dist_config

        # Directory needs to be 777 so non-spark users can write job history
        # there. It needs to be g+s (HDFS is g+s by default) so all entries
        # are readable by spark (in the spark group). It needs to be +t so
        # users cannot remove files they don't own.
        if mode.startswith('yarn'):
            events_dir = 'hdfs://{}'.format(dc.path('spark_events'))
            utils.run_as('hdfs', 'hdfs', 'dfs', '-mkdir', '-p', events_dir)
            utils.run_as('hdfs', 'hdfs', 'dfs', '-chmod', '1777', events_dir)
            utils.run_as('hdfs', 'hdfs', 'dfs', '-chown', '-R', 'ubuntu:spark',
                         events_dir)
        else:
            events_dir = dc.path('spark_events')
            events_dir.makedirs_p()
            events_dir.chmod(0o3777)
            host.chownr(events_dir, 'ubuntu', 'spark', chowntopdir=True)
def install_packages():
    # Add TrilioVault repository to install required package
    # and add queens repo to install nova libraries
    if not add_user():
        log("Adding dmapi user failed!")
        return

    os.system('sudo echo "{}" > '
              '/etc/apt/sources.list.d/trilio-gemfury-sources.list'.format(
                  config('triliovault-pkg-source')))

    new_src = config('openstack-origin')
    configure_installation_source(new_src)

    if config('python-version') == 2:
        dmapi_pkg = 'dmapi'
    else:
        dmapi_pkg = 'python3-dmapi'

    apt_update()
    dmapi.install()
    # Placing the service file
    os.system('sudo cp files/trilio/tvault-datamover-api.service '
              '/etc/systemd/system/')
    chownr('/var/log/dmapi', DMAPI_USR, DMAPI_GRP)
    mkdir('/var/cache/dmapi', DMAPI_USR, DMAPI_GRP, perms=493)
    os.system('sudo systemctl enable tvault-datamover-api')
    service_restart('tvault-datamover-api')

    application_version_set(get_new_version(dmapi_pkg))
    reactive.set_state('charm.installed')
Ejemplo n.º 4
0
def install(sojobo):
    api_dir = list(sojobo.connection())[0]['api-dir']
    copyfile('{}/files/controller_aws.py'.format(charm_dir()), '{}/controllers/controller_aws.py'.format(api_dir))
    chownr(api_dir, 'sojobo', 'www-data', chowntopdir=True)
    service_restart('nginx')
    status_set('active', 'data copied')
    set_state('controller-aws.installed')
Ejemplo n.º 5
0
def setup_images_folder():
    status_set("maintenance", "setting up VM images folder")
    mkdir('/opt/VNF', owner='ubuntu', group='ubuntu', perms=0o775, force=False)
    symlink('/var/lib/libvirt/images', '/opt/VNF/images')
    chownr('/opt/VNF', owner='ubuntu', group='ubuntu', follow_links=False, chowntopdir=True)
    chownr('/var/lib/libvirt/images', owner='root', group='ubuntu', follow_links=False, chowntopdir=True)
    chmod('/var/lib/libvirt/images', 0o775)
Ejemplo n.º 6
0
def configured_devpi():
    status.maintenance('Configuring devpi')

    DEVPI_PATH.mkdir(mode=0o755, parents=True, exist_ok=True)
    devpi_server_bin = DEVPI_ENV_BIN / 'devpi-server'

    # initialize devpi
    adduser('devpi')
    chownr(str(DEVPI_PATH), 'devpi', 'devpi', chowntopdir=True)
    check_call([
        'sudo', '-u', 'devpi',
        str(devpi_server_bin), '--init', '--serverdir',
        str(DEVPI_PATH)
    ])

    # render service
    render('devpi.service',
           '/etc/systemd/system/devpi.service',
           context={
               'devpi_server_bin': devpi_server_bin,
               'devpi_path': str(DEVPI_PATH)
           })

    open_port(3141)

    # enable service
    check_call(['systemctl', 'enable', 'devpi.service'])

    # start service
    check_call(['systemctl', 'start', 'devpi.service'])

    status.active('devpi running')
    set_flag('devpi.configured')
Ejemplo n.º 7
0
def install():
    conf = hookenv.config()
    context = get_install_context()
    gogs_bdist = hookenv.resource_get('bdist')
    check_call(["tar", "xzf", gogs_bdist], cwd="/opt")

    # Create gogs user & group
    add_group(context['group'])
    adduser(context['user'], system_user=True)

    for dir in ('.ssh', 'repositories', 'data', 'logs'):
        os.makedirs(
            os.path.join(context['home'], dir), mode=0o700, exist_ok=True)
    os.makedirs(os.path.join(context['home'], 'custom', 'conf'),
                mode=0o755, exist_ok=True)
    chownr(context['home'], context['user'], context['group'], True, True)

    render(source='upstart',
           target="/etc/init/gogs.conf",
           perms=0o644,
           context=context)
    render(source='gogs.service',
           target="/lib/systemd/system/gogs.service",
           perms=0o644,
           context=context)
    hookenv.status_set('maintenance', 'installation complete')
def install_review_queue():
    status_set('maintenance', 'Installing Review Queue')

    with tempfile.TemporaryDirectory() as tmp_dir:
        install_dir = install_remote(config['repo'], dest=tmp_dir)
        contents = os.listdir(install_dir)
        if install_dir == tmp_dir and len(contents) == 1:
            # unlike the git handler, the archive handler just returns tmp_dir
            # even if the archive contents are nested in a folder as they
            # should be, so we have to normalize for that here
            install_dir = os.path.join(install_dir, contents[0])
        shutil.rmtree(APP_DIR, ignore_errors=True)
        log('Moving app source from {} to {}'.format(
            install_dir, APP_DIR))
        shutil.move(install_dir, APP_DIR)
    subprocess.check_call('make .venv'.split(), cwd=APP_DIR)
    if init_is_systemd():
        shutil.copyfile(SYSTEMD_SRC, SYSTEMD_DEST)
        shutil.copyfile(SYSTEMD_TASK_SRC, SYSTEMD_TASK_DEST)
        subprocess.check_call(['systemctl', 'daemon-reload'])
    else:
        shutil.copyfile(UPSTART_SRC, UPSTART_DEST)
        shutil.copyfile(UPSTART_TASK_SRC, UPSTART_TASK_DEST)
        subprocess.check_call(['initctl', 'reload-configuration'])
    shutil.copyfile(LP_CREDS_SRC, LP_CREDS_DEST)
    shutil.copyfile(APP_INI_SRC, APP_INI_DEST)
    chownr(APP_DIR, APP_USER, APP_GROUP)

    set_state('reviewqueue.installed')
    change_config()
    update_db()
    update_amqp()
    update_secret()
    set_state('reviewqueue.restart')
Ejemplo n.º 9
0
def prepare_env():
    saved_path = os.getcwd()
    os.chdir('{}'.format(CHARM_LIB_DIR + "oracle_neutron"))
    cmd = "useradd --home-dir /var/lib/neutron --create-home \
--system --shell /bin/false neutron"
    os.system(cmd)
    mkdir("/var/log/neutron")
    mkdir("/etc/neutron")
    mkdir("/etc/neutron/rootwrap.d")
    mkdir("/etc/neutron/plugins/ml2")

    chownr("/var/log/neutron", 'neutron', 'neutron')
    chownr("/var/lib/neutron", 'neutron', 'neutron')
    chownr("/etc/neutron", 'neutron', 'neutron')
    chownr("/etc/neutron", 'neutron', 'neutron')
    chownr("/etc/neutron/plugins", 'neutron', 'neutron')    
    os.chmod("/var/log/neutron", 0o766)
    os.system(cmd)

    
    cmd = "./tools/generate_config_file_samples.sh"
    os.system(cmd)


    cmd = "cp etc/api-paste.ini /etc/neutron/api-paste.ini"
    os.system(cmd)
    cmd = "cp etc/policy.json /etc/neutron/policy.json"
    os.system(cmd)
    cmd = "cp etc/rootwrap.conf /etc/rootwrap.conf"
    os.system(cmd)
    cmd = "cp -R etc/neutron/rootwrap.d/* /etc/neutron/rootwrap.d/"
    os.system(cmd)
    cmd = "cp etc/rootwrap.conf /etc/neutron/rootwrap.conf"
    os.system(cmd)
    os.chdir(saved_path)
Ejemplo n.º 10
0
def _install_mattermost():
    # Backup existing installation if it exists
    backup_path = None
    if os.path.isdir('/opt/mattermost'):
        backup_path = "/opt/mattermost.back{}".format(str(datetime.datetime.now()))
        shutil.move('/opt/mattermost', backup_path)
    # Create mattermost user & group if not exists
    if not group_exists('mattermost'):
        add_group("mattermost")
    if not user_exists('mattermost'):
        adduser("mattermost", system_user=True)
    # Get and uppack resource
    mattermost_bdist = resource_get('bdist')
    extract_tarfile(mattermost_bdist, destpath="/opt")

    # Render systemd template
    render(source="mattermost.service.tmpl",
           target="/etc/systemd/system/mattermost.service",
           perms=0o644,
           owner="root",
           context={})
    check_call(['systemctl', 'daemon-reload'])
    if backup_path:
        shutil.move(
            '{}/config/config.json'.format(backup_path),
            '/opt/mattermost/config/config.json')
        shutil.move(
            '{}/data'.format(backup_path),
            '/opt/mattermost/')
    # Create dirs that don't exist yet
    for folder in ("data", "logs", "config"):
        os.makedirs("/opt/mattermost/{}".format(folder),
                    mode=0o700,
                    exist_ok=True)
    chownr("/opt/mattermost", "mattermost", "mattermost", chowntopdir=True)
Ejemplo n.º 11
0
def install_packages():
    # Add TrilioVault repository to install required package
    # and add queens repo to install nova libraries
    if not validate_ip(config('triliovault-ip')):
        log("Invalid IP address !")
        status_set('blocked',
                   'Invalid IP address, please provide correct IP address')
        return

    if not add_user():
        log("Adding dmapi user failed!")
        return

    add_source('deb http://{}:8085 deb-repo/'.format(config('triliovault-ip')))
    os.system('sudo add-apt-repository cloud-archive:queens')
    apt_update()
    dmapi.install()
    apt_install(['dmapi'], options=['--allow-unauthenticated'], fatal=True)
    # Placing the service file
    os.system('sudo cp files/trilio/tvault-datamover-api.service '
              '/etc/systemd/system/')
    chownr('/var/log/dmapi', DMAPI_USR, DMAPI_GRP)
    os.system('sudo systemctl enable tvault-datamover-api')
    service_restart('tvault-datamover-api')

    reactive.set_state('charm.installed')
Ejemplo n.º 12
0
    def configure_events_dir(self, mode):
        """
        Create directory for spark event data.

        This directory is used by workers to store event data. It is also read
        by the history server when displaying event information.

        :param string mode: Spark execution mode to determine the dir location.
        """
        dc = self.dist_config

        # Directory needs to be 777 so non-spark users can write job history
        # there. It needs to be g+s (HDFS is g+s by default) so all entries
        # are readable by spark (in the spark group). It needs to be +t so
        # users cannot remove files they don't own.
        if mode.startswith('yarn'):
            events_dir = 'hdfs://{}'.format(dc.path('spark_events'))
            utils.run_as('hdfs', 'hdfs', 'dfs', '-mkdir', '-p', events_dir)
            utils.run_as('hdfs', 'hdfs', 'dfs', '-chmod', '1777', events_dir)
            utils.run_as('hdfs', 'hdfs', 'dfs', '-chown', '-R', 'ubuntu:spark',
                         events_dir)
        else:
            events_dir = dc.path('spark_events')
            events_dir.makedirs_p()
            events_dir.chmod(0o3777)
            host.chownr(events_dir, 'ubuntu', 'spark', chowntopdir=True)
Ejemplo n.º 13
0
def install():
    """ Install Hook """
    log('ftb-infinity: install')
    status_set('maintenance', 'installing FTB modpack')

    # Add user
    adduser(FTB_USER)
    mkdir(FTB_HOME, owner=FTB_USER, group=FTB_USER, perms=0o750)
    check_call(['usermod', '-s', '/bin/bash', '-d', FTB_HOME, FTB_USER])

    # Download ftb
    ArchiveUrlFetchHandler().install(FTB_DL_URL, FTB_HOME)

    # Sanitize permissions, zip!
    chownr(FTB_HOME, FTB_USER, FTB_USER)
    path = os.path.join(FTB_HOME, 'FTBInstall.sh')
    s = os.stat(path)
    os.chmod(path, s.st_mode | stat.S_IXUSR | stat.S_IXGRP)

    # Accept EULA
    sed(os.path.join(FTB_HOME, 'eula.txt'), 'eula=false', 'eula=true')

    # Download minecraft jars
    with chdir(FTB_HOME):
        check_call(['sudo', '-u', FTB_USER, '-H', os.path.join(FTB_HOME, 'FTBInstall.sh')])

    # Render server.properties
    ftb_config_server()

    # Deploy systemd service
    ftb_systemd_install()

    set_state(CHARM_STATE_AVAILABLE)
    status_set('waiting', 'ftb downloaded')
Ejemplo n.º 14
0
def install_crowd():
    host.adduser('crowd')
    fetch.install_remote(
        source=CROWD_URL.format(hookenv.config('crowd-version')),  # version
        dest=CROWD_INSTALL,
        # checksum=None,
        # hash_type='sha1'
    )
    host.lchownr(
        CROWD_INSTALL,
        owner='crowd',
        group='crowd',
    )
    for dir in [
            '{}/atlassian-crowd-{}'.format(CROWD_INSTALL,
                                           hookenv.config('crowd-version')),
            '/var/crowd-home',
            '/var/crowd-home/shared/',
    ]:
        try:
            mkdir(dir)
        except:
            pass
        host.chownr(
            dir,
            owner='crowd',
            group='crowd',
            chowntopdir=True,
        )
    set_flag('crowd.installed')
Ejemplo n.º 15
0
def setup_images_folder():
    status_set("maintenance", "Setting up VM images folder")
    mkdir('/opt/VNF', owner='openvim', group='openvim', perms=0o775, force=False)
    symlink('/var/lib/libvirt/images', '/opt/VNF/images')
    chownr('/opt/VNF', owner='openvim', group='openvim', follow_links=False, chowntopdir=True)
    chownr('/var/lib/libvirt/images', owner='root', group='openvim', follow_links=False, chowntopdir=True)
    chmod('/var/lib/libvirt/images', 0o775)
Ejemplo n.º 16
0
def install_layer_openmano():
    status_set('maintenance', 'Installing')

    cfg = config()

    # TODO change user home
    # XXX security issue!
    host.adduser(USER, password=USER)

    # TODO check out a branch
    dest_dir = install_remote(
        cfg['source'],
        dest=INSTALL_PATH,
        depth='1',
        branch='master',
    )
    os.mkdir(os.path.join(dest_dir, 'logs'))
    host.chownr(dest_dir, USER, USER)
    kvdb.set('repo', dest_dir)

    os.mkdir('/home/{}/bin'.format(USER))

    os.symlink(
        "{}/openmano".format(dest_dir),
        "/home/{}/bin/openmano".format(USER))
    os.symlink(
        "{}/scripts/openmano-report.sh".format(dest_dir),
        "/home/{}/bin/openmano-report.sh".format(USER))
    os.symlink(
        "{}/scripts/service-openmano.sh".format(dest_dir),
        "/home/{}/bin/service-openmano".format(USER))

    open_port(9090)
    set_state('openmano.installed')
Ejemplo n.º 17
0
def install():
    hookenv.status_set('maintenance', 'Installing CABS')
    fetch.apt_update()
    fetch.apt_install(fetch.filter_installed_packages([
        'graphite-carbon',
        'graphite-web',
        'apache2',
        'apache2-mpm-worker',
        'libapache2-mod-wsgi',
        'postgresql',
        'python-virtualenv',
        'python-dev',
        'python-requests',
    ]))

    touch('/etc/apache2/sites-available/cabs-graphite.conf')
    shutil.copyfile('files/graphite.conf',
                    '/etc/apache2/sites-available/cabs-graphite.conf')
    shutil.copyfile('files/graphite-carbon', '/etc/default/graphite-carbon')
    apache2.enable_site('cabs-graphite')

    host.chownr('/var/lib/graphite', '_graphite', '_graphite')
    subprocess.check_call('sudo -u _graphite graphite-manage syncdb --noinput',
                          shell=True)

    extract_tar('payload/collector-web.tar.gz', '/opt/collector-web')
    config = hookenv.config()
    try:
        env = None
        if config.get('proxy'):
            env = dict(os.environ)
            env.update({'http_proxy': config.get('proxy'),
                        'https_proxy': config.get('proxy')})
        subprocess.check_call(['make', '.venv'], cwd='/opt/collector-web',
                              env=env)
    except subprocess.CalledProcessError as e:
        logging.exception(e)
        hookenv.status_set(
            'blocked', 'Failed to create venv - do you require a proxy?')
        return

    # setup postgres for collector-web
    subprocess.check_call(['scripts/ensure_db_user.sh'])
    subprocess.check_call(['scripts/ensure_db.sh'])

    # Install upstart config for collector-web
    shutil.copyfile('/opt/collector-web/conf/upstart/collectorweb.conf',
                    '/etc/init/collectorweb.conf')

    host.chownr('/opt/collector-web', 'ubuntu', 'ubuntu')

    host.service_restart('apache2')
    host.service_restart('carbon-cache')
    host.service_restart('collectorweb')

    # Install cron, vhost for gui, etc
    hookenv.open_port(9000)
    hookenv.open_port(9001)
    hookenv.open_port(2003)
Ejemplo n.º 18
0
def setup_repository():
    config = hookenv.config()
    if config['git-repo'] is not "":
        pull_repository()
    else:
        create_repository()
    chownr('/srv', owner='ubuntu', group='ubuntu')
    status_set('active', '')
Ejemplo n.º 19
0
 def init_zkrest(self):
     # Zookeeper user needs to compile the rest contrib server.
     # So zookeeper needs to:
     # 1. Have a home dir for ant cache to exist
     # 2. Write to the /usr/lib/zookeeper
     chownr(self.dist_config.path('zookeeper'), 'zookeeper', 'zookeeper', chowntopdir=True)
     with chdir(self.dist_config.path('zookeeper')):
         utils.run_as('zookeeper', 'ant')
     unitdata.kv().set('rest.initialised', True)
def create_certs_dir():
    if not ES_CERTS_DIR.exists():
        ES_CERTS_DIR.mkdir()
    chownr(path=str(ES_CERTS_DIR),
           owner='elasticsearch',
           group='elasticsearch',
           follow_links=True,
           chowntopdir=True)
    set_flag('cert.dir.available')
def provision_certs_all_nodes():
    certs = charms.leadership.leader_get('elasticsearch_certs')
    ES_CERTS.write_bytes(b64decode(certs))
    chownr(path=str(ES_CERTS),
           owner='elasticsearch',
           group='elasticsearch',
           follow_links=True,
           chowntopdir=True)
    set_flag('elasticsearch.certs.provisioned')
Ejemplo n.º 22
0
 def check_dir(self, path):
     """Check if dir has been provided and is configured correctly for usage."""
     if path:
         if not os.path.isdir(path):
             os.mkdir(path)
         chownr(path=path,
                owner=self.opts.get("owner"),
                group=self.opts.get("group"))
     return False
Ejemplo n.º 23
0
 def init_zkrest(self):
     # Zookeeper user needs to compile the rest contrib server.
     # So zookeeper needs to:
     # 1. Have a home dir for ant cache to exist
     # 2. Write to the /usr/lib/zookeeper
     chownr(self.dist_config.path('zookeeper'), 'zookeeper', 'zookeeper', chowntopdir=True)
     with chdir(self.dist_config.path('zookeeper')):
         utils.run_as('zookeeper', 'ant')
     unitdata.kv().set('rest.initialised', True)
Ejemplo n.º 24
0
def install_tengu():
    if not os.path.isdir('/home/{}'.format(USER)):
        os.mkdir('/home/{}'.format(USER))
        adduser(USER)
        chownr('/home/{}'.format(USER), USER, USER, chowntopdir=True)
    if not os.path.isdir(API_DIR):
        os.mkdir(API_DIR)
    mergecopytree('files/tengu_ui', API_DIR)
    chownr(API_DIR, USER, GROUP, chowntopdir=True)
Ejemplo n.º 25
0
def create_conf_dir():
    """Ensure config dir exists
    """
    status_set('maintenance', "Creating application directories")
    for directory in [SU_CONF_DIR, LOG_DIR]:
        if not os.path.isdir(directory):
            os.makedirs(directory, mode=0o755, exist_ok=True)
        chownr(directory, owner='www-data', group='www-data')
    status_set('active', "Application directories created")
    set_flag('conf.dirs.available')
Ejemplo n.º 26
0
    def install(self):
        jujuresources.install(self.resources['hue'],
                              destination=self.dist_config.path('hue'),
                              skip_top_level=True)

        self.dist_config.add_users()
        self.dist_config.add_dirs()
        self.dist_config.add_packages()
        chownr(self.dist_config.path('hue'), 'hue', 'hadoop')
        unitdata.kv().set('hue.installed', True)
Ejemplo n.º 27
0
def download_openvim():
    status_set("maintenance", "Downloading OpenVIM")
    if os.path.isdir("/opt/openmano"):
        rmtree("/opt/openmano")
    gitrepo.clone_from('https://github.com/tvansteenburgh/openmano.git',
                       '/opt/openmano')
    chownr('/opt/openmano',
           owner='openvim',
           group='openvim',
           follow_links=False,
           chowntopdir=True)
Ejemplo n.º 28
0
    def install(self, force=False):
        if not force and self.is_installed():
            return
        jujuresources.install(self.resources['hue'],
                              destination=self.dist_config.path('hue'),
                              skip_top_level=True)

        self.dist_config.add_users()
        self.dist_config.add_dirs()
        self.dist_config.add_packages()
        chownr(self.dist_config.path('hue'), 'hue', 'hadoop')
        unitdata.kv().set('hue.installed', True)
Ejemplo n.º 29
0
    def install(self, force=False):
        if not force and self.is_installed():
            return
        jujuresources.install(self.resources['hue'],
                              destination=self.dist_config.path('hue'),
                              skip_top_level=True)

        self.dist_config.add_users()
        self.dist_config.add_dirs()
        self.dist_config.add_packages()
        chownr(self.dist_config.path('hue'), 'hue', 'hadoop')
        unitdata.kv().set('hue.installed', True)
Ejemplo n.º 30
0
def install(sojobo):
    api_dir = list(sojobo.connection())[0]['api-dir']
    copyfile('{}/files/controller_azure.py'.format(charm_dir()),
             '{}/controllers/controller_azure.py'.format(api_dir))
    copyfile('{}/files/bootstrap_azure_controller.py'.format(charm_dir()),
             '{}/scripts/bootstrap_azure_controller.py'.format(api_dir))
    copyfile('{}/files/add_azure_credential.py'.format(charm_dir()),
             '{}/scripts/add_azure_credential.py'.format(api_dir))
    chownr(api_dir, 'sojobo', 'www-data', chowntopdir=True)
    service_restart('nginx')
    status_set('active', 'data copied')
    set_flag('controller-azure.installed')
Ejemplo n.º 31
0
 def configure_installdir(self):
     ''' Create and fix permissions on install dir'''
     if not os.path.exists(self.installdir):
         os.makedirs(self.installdir)
     hookenv.log("Fixing data dir permissions: {}".format(
         self.installdir), 'DEBUG')
     if host.user_exists(self.user) and host.group_exists(self.user):
         host.chownr(self.installdir, self.user,
                     self.user, chowntopdir=True)
     else:
         hookenv.log("Skipping chown because user/group {} is missing".format(
             self.installdir), 'DEBUG')
    def run_smoke_tests(self, smoke_components=None, smoke_env=None):
        """
        Run the Bigtop smoke tests for given components using the gradle
        wrapper script.

        :param list smoke_components: Bigtop components to smoke test
        :param list smoke_env: Dict of required environment variables (merged
            with /etc/environment)
        """
        if not is_state('bigtop.available'):
            hookenv.log('Bigtop is not ready to run smoke tests')
            return None
        if not smoke_components:
            hookenv.log('Missing Bigtop smoke test component list')
            return None

        # We always need TERM and JAVA_HOME; merge with any user provided dict
        subprocess_env = {'TERM': 'dumb', 'JAVA_HOME': java_home()}
        if isinstance(smoke_env, dict):
            subprocess_env.update(smoke_env)

        # Ensure the base dir is owned by ubuntu so we can create a .gradle dir.
        chownr(self.bigtop_base, 'ubuntu', 'ubuntu', chowntopdir=True)

        # Gradle doesn't honor env proxies; check for either http* or HTTP* and
        # set cli args as needed.
        http_url = os.environ.get('http_proxy', os.environ.get('HTTP_PROXY'))
        https_url = os.environ.get('https_proxy', os.environ.get('HTTPS_PROXY'))
        proxy_args = []
        if http_url:
            parsed_url = urlparse(http_url)
            proxy_args += ['-Dhttp.proxyHost={}'.format(parsed_url.hostname),
                           '-Dhttp.proxyPort={}'.format(parsed_url.port)]
        if https_url:
            parsed_url = urlparse(https_url)
            proxy_args += ['-Dhttps.proxyHost={}'.format(parsed_url.hostname),
                           '-Dhttps.proxyPort={}'.format(parsed_url.port)]

        # Bigtop can run multiple smoke tests at once; construct the right args.
        comp_args = ['bigtop-tests:smoke-tests:%s:test' % c for c in smoke_components]
        gradlew_args = ['-Psmoke.tests', '--info'] + proxy_args + comp_args

        hookenv.log('Bigtop smoke test environment: {}'.format(subprocess_env))
        hookenv.log('Bigtop smoke test args: {}'.format(gradlew_args))
        with chdir(self.bigtop_base):
            try:
                utils.run_as('ubuntu', './gradlew', *gradlew_args,
                             env=subprocess_env)
                smoke_out = 'success'
            except subprocess.CalledProcessError as e:
                smoke_out = e.output
        return smoke_out
    def run_smoke_tests(self, smoke_components=None, smoke_env=None):
        """
        Run the Bigtop smoke tests for given components using the gradle
        wrapper script.

        :param list smoke_components: Bigtop components to smoke test
        :param list smoke_env: Dict of required environment variables (merged
            with /etc/environment)
        """
        if not is_state('bigtop.available'):
            hookenv.log('Bigtop is not ready to run smoke tests')
            return None
        if not smoke_components:
            hookenv.log('Missing Bigtop smoke test component list')
            return None

        # We always need TERM and JAVA_HOME; merge with any user provided dict
        subprocess_env = {'TERM': 'dumb', 'JAVA_HOME': java_home()}
        if isinstance(smoke_env, dict):
            subprocess_env.update(smoke_env)

        # Ensure the base dir is owned by ubuntu so we can create a .gradle dir.
        chownr(self.bigtop_base, 'ubuntu', 'ubuntu', chowntopdir=True)

        # Gradle doesn't honor env proxies; check for either http* or HTTP* and
        # set cli args as needed.
        http_url = os.environ.get('http_proxy', os.environ.get('HTTP_PROXY'))
        https_url = os.environ.get('https_proxy', os.environ.get('HTTPS_PROXY'))
        proxy_args = []
        if http_url:
            parsed_url = urlparse(http_url)
            proxy_args += ['-Dhttp.proxyHost={}'.format(parsed_url.hostname),
                           '-Dhttp.proxyPort={}'.format(parsed_url.port)]
        if https_url:
            parsed_url = urlparse(https_url)
            proxy_args += ['-Dhttps.proxyHost={}'.format(parsed_url.hostname),
                           '-Dhttps.proxyPort={}'.format(parsed_url.port)]

        # Bigtop can run multiple smoke tests at once; construct the right args.
        comp_args = ['bigtop-tests:smoke-tests:%s:test' % c for c in smoke_components]
        gradlew_args = ['-Psmoke.tests', '--info'] + proxy_args + comp_args

        hookenv.log('Bigtop smoke test environment: {}'.format(subprocess_env))
        hookenv.log('Bigtop smoke test args: {}'.format(gradlew_args))
        with chdir(self.bigtop_base):
            try:
                utils.run_as('ubuntu', './gradlew', *gradlew_args,
                             env=subprocess_env)
                smoke_out = 'success'
            except subprocess.CalledProcessError as e:
                smoke_out = e.output
        return smoke_out
 def _copy_resources(self):
     self._ensure_folders()
     for f in self.FILE_MAP:
         if os.path.isfile(f) is False:
             raise ValueError(
                 "Missing required file %s. Package not installed?" % f)
         shutil.copy(f,
                     os.path.join(self.TFTP_ROOT, self.FILE_MAP[f]),
                     follow_symlinks=True)
     ch_host.chownr(self.TFTP_ROOT,
                    _IRONIC_USER,
                    _IRONIC_GROUP,
                    chowntopdir=True)
Ejemplo n.º 35
0
def generate_ssh_key(user):
    if ssh_priv_key(user).exists():
        return
    sshdir = ssh_key_dir(user)
    if not sshdir.exists():
        host.mkdir(sshdir, owner=user, group='hadoop', perms=0o755)
    keyfile = ssh_priv_key(user)
    (sshdir / 'config').write_lines([
        'Host *',
        '    StrictHostKeyChecking no'
    ], append=True)
    check_call(['ssh-keygen', '-t', 'rsa', '-P', '', '-f', keyfile])
    host.chownr(sshdir, user, 'hadoop')
Ejemplo n.º 36
0
def install_solr():
    version = config('solr_version')
    adduser('solr')
    au.download(
        "http://archive.apache.org/dist/lucene/solr/{0}/solr-{0}.tgz".format(
            version), "/tmp/solr.tgz")
    mkdir('/opt/solr')
    check_output([
        'tar', 'xvfz', "/tmp/solr.tgz", '-C', "/opt/solr",
        '--strip-components=1'
    ])
    chownr('/opt/solr', 'solr', 'solr', chowntopdir=True)
    set_state('solr.installed')
Ejemplo n.º 37
0
def fix_perms(data_dir):
    # The path to data_dir must be world readable, so the postgres user
    # can traverse to it.
    p = data_dir
    while p != '/':
        p = os.path.dirname(p)
        subprocess.check_call(['chmod', 'a+rX', p], universal_newlines=True)

    # data_dir and all of its contents should be owned by the postgres
    # user and group.
    host.chownr(data_dir, 'postgres', 'postgres', follow_links=False)

    # data_dir should not be world readable.
    os.chmod(data_dir, 0o700)
def render_ini(pgsql):
    db_uri = "postgresql://{}:{}@{}:{}/{}".format(
        pgsql.user(), pgsql.password(), pgsql.host(), pgsql.port(), pgsql.database()
    )

    ini = ""
    with open(APP_INI_SRC, "r") as f:
        ini = f.read()
        ini = re.sub(r"(sqlalchemy.url\s*=)(.*)", r"\1 " + db_uri, ini)
        ini = re.sub(r"(port\s*=)(.*)", r"\1 " + str(config["port"]), ini)

    with open(APP_INI_DEST, "w") as f:
        f.write(ini)
    chownr(APP_INI_DEST, APP_USER, APP_GROUP)
Ejemplo n.º 39
0
def install_whelp():
    '''
    Reactive hook to install whelp
    '''
    hookenv.status_set('maintenance', 'Installing whelp')

    whelp = Whelp()

    whelp_tar = hookenv.resource_get('webapp')

    hookenv.status_set('maintenance', 'installing webapp')

    # Extract tar resource
    tar = tarfile.open(whelp_tar)
    tar.extractall(WHELP_HOME)

    # Install pip3 reqs
    with chdir('/srv/whelp/whelp'):
        with open('requirements.txt', 'r') as f:
            for i in list(map(lambda b: b.strip('\n'), f.readlines())):
                pip_install(i)

    # Set permissions
    chownr(WHELP_HOME, 'www-data', 'www-data')

    # Get state files for whelp to run
    whelp.get_whelp_bucket_files()

    # Configure NGINX
    configure_site('whelp',
                   'whelp.vhost',
                   port=config['port'],
                   whelp_port=config['whelp-port'])

    # Start Supervisor
    subprocess.call('supervisord -c /etc/supervisor/supervisord.conf'.split(),
                    shell=False)

    # Render whelp supervisor.conf
    whelp.render_whelp_supervisor_conf()

    # Open port 80
    hookenv.open_port(config['port'])

    # Set status to active
    hookenv.status_set('active', 'Whelp is active on port %s' % config['port'])

    # Set whelp.available state
    set_state('whelp.installed')
Ejemplo n.º 40
0
    def perms():
        for p in paths:
            makedirs(p[0], exist_ok=True)

            try:
                getgrnam(p[2])
            except KeyError:
                add_group(p[2], system_group=True)

            if not user_exists(p[1]):
                adduser(p[1], shell='/bin/false', system_user=True,
                        primary_group=p[2])

            # Ensure path is owned appropriately
            chownr(path=p[0], owner=p[1], group=p[2], chowntopdir=True)
Ejemplo n.º 41
0
def get_ssh_key(user):
    sshdir = Path("/home/%s/.ssh" % user)
    if not sshdir.exists():
        host.mkdir(sshdir, owner=user, group="hadoop", perms=0o755)
    keyfile = sshdir / "id_rsa"
    pubfile = sshdir / "id_rsa.pub"
    authfile = sshdir / "authorized_keys"
    if not pubfile.exists():
        (sshdir / "config").write_lines(["Host *", "    StrictHostKeyChecking no"], append=True)
        check_call(["ssh-keygen", "-t", "rsa", "-P", "", "-f", keyfile])
        host.chownr(sshdir, user, "hadoop")
    # allow ssh'ing to localhost; useful for things like start_dfs.sh
    if not authfile.exists():
        Path.copy(pubfile, authfile)
    return pubfile.text()
def prepare_es_data_dir():
    '''
    Create (if not exists) and set perms on elasticsearch data dir.
    '''

    if not ES_DATA_DIR.exists():
        ES_DATA_DIR.mkdir(parents=True, exist_ok=True)

    chownr(path=str(ES_DATA_DIR),
           owner='elasticsearch',
           group='elasticsearch',
           follow_links=True,
           chowntopdir=True)

    set_flag('elasticsearch.storage.dir.prepared')
Ejemplo n.º 43
0
 def set_indexers(self, status):
     '''Enable or disable all indexer searching based on provided status
     status: True will turn on indexers
     status: False will turn off indexers'''
     conn = sqlite3.connect(self.database_file)
     c = conn.cursor()
     if status:
         c.execute(
             '''UPDATE Indexers SET EnableRss = 1, EnableSearch = 1''')
     else:
         c.execute(
             '''UPDATE Indexers SET EnableRss = 0, EnableSearch = 0''')
     conn.commit()
     host.chownr(self.home_dir,
                 owner=self.charm_config['radarr-user'],
                 group=self.charm_config['radarr-user'])
def check_for_and_mount_direct_attached_storage():
    direct_attached_device = Path('/dev/nvme0n1')
    if direct_attached_device.exists():
        sp.call(['mkfs.ext4', str(direct_attached_device)])
        sp.call(['mount', str(direct_attached_device), str(ES_DATA_DIR)])

        chownr(path=str(ES_DATA_DIR),
               owner='elasticsearch',
               group='elasticsearch',
               follow_links=True,
               chowntopdir=True)

        with open('/etc/fstab', 'a') as f:
            f.write((f'/dev/nvme0n1 {str(ES_DATA_DIR)} '
                     'ext4 defaults,nofail 0 2'))

    set_flag('direct.attached.storage.check.complete')
Ejemplo n.º 45
0
def osdize_dir(path):
    if os.path.exists(os.path.join(path, 'upstart')):
        log('Path {} is already configured as an OSD - bailing'.format(path))
        return

    if cmp_pkgrevno('ceph', "0.56.6") < 0:
        log('Unable to use directories for OSDs with ceph < 0.56.6',
            level=ERROR)
        raise

    mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755)
    chownr('/var/lib/ceph', ceph_user(), ceph_user())
    cmd = [
        'sudo', '-u',
        ceph_user(), 'ceph-disk', 'prepare', '--data-dir', path
    ]
    subprocess.check_call(cmd)
Ejemplo n.º 46
0
    def perms():
        for p in paths:
            makedirs(p[0], exist_ok=True)

            try:
                getgrnam(p[2])
            except KeyError:
                add_group(p[2], system_group=True)

            if not user_exists(p[1]):
                adduser(p[1],
                        shell='/bin/false',
                        system_user=True,
                        primary_group=p[2])

            # Ensure path is owned appropriately
            chownr(path=p[0], owner=p[1], group=p[2], chowntopdir=True)
def install():
    if db.get("repo") != config["repo"]:
        status_set("maintenance", "Installing app")
        apt_install(APT_PKGS)
        tmp_dir = install_remote(config["repo"], dest="/tmp", depth=1)
        shutil.rmtree(APP_DIR, ignore_errors=True)
        log("Moving app source from {} to {}".format(tmp_dir, APP_DIR))
        shutil.move(tmp_dir, APP_DIR)
        subprocess.check_call("make .venv".split(), cwd=APP_DIR)
        shutil.copyfile(UPSTART_SRC, UPSTART_DEST)
        chownr(APP_DIR, APP_USER, APP_GROUP)
        db.set("repo", config["repo"])

    if config.changed("port"):
        open_port(config["port"])
        if config.previous("port"):
            close_port(config.previous("port"))
Ejemplo n.º 48
0
def get_ssh_key(user):
    sshdir = Path('/home/%s/.ssh' % user)
    if not sshdir.exists():
        host.mkdir(sshdir, owner=user, group='hadoop', perms=0o755)
    keyfile = sshdir / 'id_rsa'
    pubfile = sshdir / 'id_rsa.pub'
    authfile = sshdir / 'authorized_keys'
    if not pubfile.exists():
        (sshdir / 'config').write_lines([
            'Host *',
            '    StrictHostKeyChecking no'
        ], append=True)
        check_call(['ssh-keygen', '-t', 'rsa', '-P', '', '-f', keyfile])
        host.chownr(sshdir, user, 'hadoop')
    # allow ssh'ing to localhost; useful for things like start_dfs.sh
    if not authfile.exists():
        Path.copy(pubfile, authfile)
    return pubfile.text()
Ejemplo n.º 49
0
def bootstrap_monitor_cluster(secret):
    hostname = get_unit_hostname()
    path = '/var/lib/ceph/mon/ceph-{}'.format(hostname)
    done = '{}/done'.format(path)
    if systemd():
        init_marker = '{}/systemd'.format(path)
    else:
        init_marker = '{}/upstart'.format(path)

    keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname)

    if os.path.exists(done):
        log('bootstrap_monitor_cluster: mon already initialized.')
    else:
        # Ceph >= 0.61.3 needs this for ceph-mon fs creation
        mkdir('/var/run/ceph', owner=ceph_user(),
              group=ceph_user(), perms=0o755)
        mkdir(path, owner=ceph_user(), group=ceph_user())
        mkdir("/var/lib/ceph/tmp", owner=ceph_user(), group=ceph_user())
        # end changes for Ceph >= 0.61.3
        try:
            subprocess.check_call(['ceph-authtool', keyring,
                                   '--create-keyring', '--name=mon.',
                                   '--add-key={}'.format(secret),
                                   '--cap', 'mon', 'allow *'])

            subprocess.check_call(['ceph-mon', '--mkfs',
                                   '-i', hostname,
                                   '--keyring', keyring])
            chownr(path, ceph_user(), ceph_user())
            with open(done, 'w'):
                pass
            with open(init_marker, 'w'):
                pass

            if systemd():
                subprocess.check_call(['systemctl', 'enable', 'ceph-mon'])
                service_restart('ceph-mon')
            else:
                service_restart('ceph-mon-all')
        except:
            raise
        finally:
            os.unlink(keyring)
Ejemplo n.º 50
0
def install_resource():
    remove_state('charm-svg.ready')
    hookenv.status_set('maintenance', 'extracting resources')

    svg_bin = hookenv.resource_get('python-jujusvg')
    web_tar = hookenv.resource_get('webapp')

    hookenv.status_set('maintenance', 'installing python-jujusvg')

    shutil.copy(svg_bin, charmsvg.JUJUSVG_PATH)
    os.chmod(charmsvg.JUJUSVG_PATH, 0o755)

    hookenv.status_set('maintenance', 'installing webapp')

    tar = tarfile.open(web_tar)
    tar.extractall(charmsvg.INSTALL_PATH)
    chownr(charmsvg.INSTALL_PATH, 'www-data', 'www-data')

    set_state('charm-svg.installed')
Ejemplo n.º 51
0
def osdize_dir(path):
    if os.path.exists(os.path.join(path, 'upstart')):
        log('Path {} is already configured as an OSD - bailing'.format(path))
        return

    if cmp_pkgrevno('ceph', "0.56.6") < 0:
        log('Unable to use directories for OSDs with ceph < 0.56.6',
            level=ERROR)
        raise

    mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755)
    chownr('/var/lib/ceph', ceph_user(), ceph_user())
    cmd = [
        'sudo', '-u', ceph_user(),
        'ceph-disk',
        'prepare',
        '--data-dir',
        path
    ]
    subprocess.check_call(cmd)
Ejemplo n.º 52
0
def install():
    status_set('maintenance', 'Installing PDI')
    adduser('etl')
    mkdir('/home/etl')
    chownr('/home/etl', 'etl', 'etl', chowntopdir=True)
    os.chmod('/home/etl', 0o755)

    #au = ArchiveUrlFetchHandler()
    #au.install(hookenv.config()['pdi_url'], '/opt/')
    pdiarchive = hookenv.resource_get('pdi-archive')
    tar = tarfile.open(pdiarchive)
    tar.extractall("/opt/")
    chownr('/opt/data-integration', 'etl', 'etl', chowntopdir=True)
    st = os.stat('/opt/data-integration/spoon.sh')
    os.chmod('/opt/data-integration/spoon.sh', st.st_mode | stat.S_IEXEC)
    os.chmod('/opt/data-integration/carte.sh', st.st_mode | stat.S_IEXEC)
    os.chmod('/opt/data-integration/encr.sh', st.st_mode | stat.S_IEXEC)
    os.chmod('/opt/data-integration/kitchen.sh', st.st_mode | stat.S_IEXEC)
    os.chmod('/opt/data-integration/pan.sh', st.st_mode | stat.S_IEXEC)
    status_set('maintenance', 'PDI Installed')
    set_state('pdi.installed')
Ejemplo n.º 53
0
def prepare_env():
    saved_path = os.getcwd()
    os.chdir('{}'.format(CHARM_LIB_DIR + "oracle_keystone"))
    cmd = "useradd --home-dir /var/lib/keystone --create-home \
--system --shell /bin/false keystone"
    os.system(cmd)
    mkdir("/var/log/keystone")
    mkdir("/etc/keystone")
    chownr("/var/log/keystone", 'keystone', 'keystone')
    chownr("/var/lib/keystone", 'keystone', 'keystone')
    chownr("/etc/keystone", 'keystone', 'keystone')

    cmd = "cp ./etc/keystone.conf.sample /etc/keystone/keystone.conf"
    os.system(cmd)
    cmd = "cp ./etc/keystone-paste.ini /etc/keystone/keystone-paste.ini"
    os.system(cmd)
    cmd = "cp ./etc/default_catalog.templates /etc/keystone/\
default_catalog.templates"
    os.system(cmd)
    cmd = "cp ./etc/logging.conf.sample /etc/keystone/logging.conf"
    os.system(cmd)
    cmd = "cp ./etc/policy.v3cloudsample.json /etc/keystone/policy.json"
    os.system(cmd)
    cmd = "cp ./etc/sso_callback_template.html /etc/keystone/\
sso_callback_template.html"
    os.system(cmd)
    cmd = "cp ./httpd/wsgi-keystone.conf /etc/apache2/sites-available/\
keystone.conf"
    os.system(cmd)
    cmd = "cp ./httpd/wsgi-keystone.conf /etc/apache2/sites-enabled/\
keystone.conf"
    os.system(cmd)
    os.chdir(saved_path)
Ejemplo n.º 54
0
def upgrade_monitor():
    current_version = get_version()
    status_set("maintenance", "Upgrading monitor")
    log("Current ceph version is {}".format(current_version))
    new_version = config('release-version')
    log("Upgrading to: {}".format(new_version))

    try:
        add_source(config('source'), config('key'))
        apt_update(fatal=True)
    except subprocess.CalledProcessError as err:
        log("Adding the ceph source failed with message: {}".format(
            err.message))
        status_set("blocked", "Upgrade to {} failed".format(new_version))
        sys.exit(1)
    try:
        if systemd():
            for mon_id in get_local_mon_ids():
                service_stop('ceph-mon@{}'.format(mon_id))
        else:
            service_stop('ceph-mon-all')
        apt_install(packages=PACKAGES, fatal=True)

        # Ensure the ownership of Ceph's directories is correct
        chownr(path=os.path.join(os.sep, "var", "lib", "ceph"),
               owner=ceph_user(),
               group=ceph_user())
        if systemd():
            for mon_id in get_local_mon_ids():
                service_start('ceph-mon@{}'.format(mon_id))
        else:
            service_start('ceph-mon-all')
        status_set("active", "")
    except subprocess.CalledProcessError as err:
        log("Stopping ceph and upgrading packages failed "
            "with message: {}".format(err.message))
        status_set("blocked", "Upgrade to {} failed".format(new_version))
        sys.exit(1)
Ejemplo n.º 55
0
def configure_queue_processors():
    if os.path.isdir(CONSUMER_CONFIG_ROOT) is False:
        host.mkdir(CONSUMER_CONFIG_ROOT, REDDIT_USER, REDDIT_GROUP, 0755)

    def set_consumer_count(queue, count):
        queueFile = '%s/%s' % (CONSUMER_CONFIG_ROOT, queue)
        if os.path.isfile(queueFile) is False:
            f = open(queueFile, 'w')
            f.write('%s' % count)
            f.close()

    set_consumer_count('log_q', 0)
    set_consumer_count('cloudsearch_q', 0)
    set_consumer_count('scraper_q', 1)
    set_consumer_count('scraper_q', 1)
    set_consumer_count('commentstree_q', 1)
    set_consumer_count('newcomments_q', 1)
    set_consumer_count('vote_link_q', 1)
    set_consumer_count('vote_comment_q', 1)

    host.chownr(CONSUMER_CONFIG_ROOT, REDDIT_USER, REDDIT_GROUP)

    return
Ejemplo n.º 56
0
def download_openvim():
    status_set("maintenance", "downloading openvim")
    if os.path.isdir("/opt/openmano"):
        rmtree("/opt/openmano")
    gitrepo.clone_from('https://github.com/wwwtyro/openmano.git', '/opt/openmano')
    chownr('/opt/openmano', owner='openvim', group='openvim', follow_links=False, chowntopdir=True)
Ejemplo n.º 57
0
    def configure_sparkbench(self):
        """
        Install/configure/remove Spark-Bench based on user config.

        If config[spark_bench_enabled], fetch, install, and configure
        Spark-Bench on initial invocation. Subsequent invocations will skip the
        fetch/install, but will reconfigure Spark-Bench since we may need to
        adjust the data dir (eg: benchmark data is stored in hdfs when spark
        is in yarn mode; locally in all other execution modes).
        """
        install_sb = hookenv.config()['spark_bench_enabled']
        sb_dir = '/home/ubuntu/SparkBench'
        if install_sb:
            # Fetch/install on our first go-round, then set unit data so we
            # don't reinstall every time this function is called.
            if not unitdata.kv().get('spark_bench.installed', False):
                sb_url = hookenv.config()['spark_bench_url']

                Path(sb_dir).rmtree_p()
                au = ArchiveUrlFetchHandler()
                au.install(sb_url, '/home/ubuntu')

                # NB: This block is unused when using one of our sb tgzs. It
                # may come in handy if people want a tgz that does not expand
                # to our expected sb_dir.
                # #####
                # Handle glob if we use a .tgz that doesn't expand to sb_dir
                # sb_archive_dir = glob('/home/ubuntu/SparkBench*')[0]
                # SparkBench expects to live in ~/SparkBench, so put it there
                # Path(sb_archive_dir).rename(sb_dir)
                # #####

                # Ensure users in the spark group can write to any subdirectory
                # of sb_dir (spark needs to write benchmark output there when
                # running in local modes).
                host.chownr(Path(sb_dir), 'ubuntu', 'spark', chowntopdir=True)
                for r, d, f in os.walk(sb_dir):
                    os.chmod(r, 0o2775)

                unitdata.kv().set('spark_bench.installed', True)
                unitdata.kv().flush(True)

            # Configure the SB env every time this function is called.
            sb_conf = '{}/conf'.format(sb_dir)
            sb_env = Path(sb_conf) / 'env.sh'
            if not sb_env.exists():
                (Path(sb_conf) / 'env.sh.template').copy(sb_env)

            # NB: A few notes on configuring SparkBench:
            # 1. Input data has been pregenerated and packed into the tgz. All
            # spark cluster members will have this data locally, which enables
            # us to execute benchmarks in the absense of HDFS. When spark is in
            # yarn mode, we'll need to generate and store this data in HDFS
            # so nodemanagers can access it (NMs obviously won't have SB
            # installed locally). Set DATA_HDFS to a local dir or common HDFS
            # location depending on our spark execution mode.
            #
            # 2. SB tries to SSH to spark workers to purge vmem caches. This
            # isn't possible in containers, nor is it possible in our env
            # because we don't distribute ssh keys among cluster members.
            # Set MC_LIST to an empty string to prevent this behavior.
            #
            # 3. Throughout SB, HADOOP_HOME/bin is used as the prefix for the
            # hdfs command. Bigtop's hdfs lives at /usr/bin/hdfs, so set the
            # SB HADOOP_HOME accordingly (it's not used for anything else).
            #
            # 4. Use our MASTER envar to set the SparkBench SPARK_MASTER url.
            # It is updated every time we (re)configure spark.
            mode = hookenv.config()['spark_execution_mode']
            if mode.startswith('yarn'):
                sb_data_dir = "hdfs:///user/ubuntu/SparkBench"
            else:
                sb_data_dir = "file://{}".format(sb_dir)

            utils.re_edit_in_place(sb_env, {
                r'^DATA_HDFS *=.*': 'DATA_HDFS="{}"'.format(sb_data_dir),
                r'^DATASET_DIR *=.*': 'DATASET_DIR="{}/dataset"'.format(sb_dir),
                r'^MC_LIST *=.*': 'MC_LIST=""',
                r'.*HADOOP_HOME *=.*': 'HADOOP_HOME="/usr"',
                r'.*SPARK_HOME *=.*': 'SPARK_HOME="/usr/lib/spark"',
                r'^SPARK_MASTER *=.*': 'SPARK_MASTER="$MASTER"',
            })
        else:
            # config[spark_bench_enabled] is false; remove it
            Path(sb_dir).rmtree_p()
            unitdata.kv().set('spark_bench.installed', False)
            unitdata.kv().flush(True)
Ejemplo n.º 58
0
def install_ssh_key(user, ssh_key):
    sshdir = Path('/home/%s/.ssh' % user)
    if not sshdir.exists():
        host.mkdir(sshdir, owner=user, group='hadoop', perms=0o755)
    Path(sshdir / 'authorized_keys').write_text(ssh_key, append=True)
    host.chownr(sshdir, user, 'hadoop')