示例#1
0
def install_keycloak():
    status_set(
        'maintenance',
        'Downloading and installing Keycloack distribution ({})'.format(
            KEYCLOAK_VERSION))
    handler = ArchiveUrlFetchHandler()
    os.makedirs(KEYCLOAK_BASE_DIR, exist_ok=True)
    handler.install(KEYCLOAK_DOWNLOAD, KEYCLOAK_BASE_DIR)
    log('Keycloak binary downloaded and extracted in {}'.format(KEYCLOAK_HOME))

    module_dir = '{}/modules/system/layers/keycloak/org/postgresql/main'.format(
        KEYCLOAK_HOME)
    os.makedirs(module_dir, exist_ok=True)
    os.symlink('{}/files/module.xml'.format(charm_dir()),
               '{}/module.xml'.format(module_dir))
    os.symlink('{}/files/postgresql-42.2.5.jar'.format(charm_dir()),
               '{}/postgresql-42.2.5.jar'.format(module_dir))
    log('PostgreSQL module copied.')

    standalone_context = {
        'name': service_name(),
        'script': '{}/bin/standalone.sh'.format(KEYCLOAK_HOME)
    }
    templating.render(source='keycloak.service.jinja2',
                      target='/etc/systemd/system/keycloak.service',
                      context=standalone_context)
    check_call(['systemctl', 'enable', 'keycloak.service'])
    log('Keycloak service enabled.')

    application_version_set(KEYCLOAK_VERSION)
    set_flag('keycloak.installed')
示例#2
0
def install():

    h = ArchiveUrlFetchHandler()

    # download, validate, install
    for deb, sha in DEBS.iteritems():
        install_deb(h.download_and_validate(full_url(deb), sha, validate="sha1"))
示例#3
0
def install_puppet_agent():

    """ Install puppet-agent
    """

    # Download and install trusty puppet deb
    hookenv.status_set('maintenance', 
                       'Configuring trusty puppet apt sources')

    aufh = ArchiveUrlFetchHandler()
    aufh.download(TRUSTY_PUPPET_DEB_URL, TRUSTY_PUPPET_DEB_TEMP)
    dpkg_trusty_puppet_deb = 'dpkg -i %s' % TRUSTY_PUPPET_DEB_TEMP
    call(dpkg_trusty_puppet_deb.split(), shell=False)
    apt_update()
    #Clean up
    rm_trusty_puppet_deb = 'rm %s' % TRUSTY_PUPPET_DEB_TEMP
    call(rm_trusty_puppet_deb.split(), shell=False)

    # Install puppet-agent
    hookenv.status_set('maintenance', 
                       'Installing puppet-agent %s' % PUPPET_VERSION)

    apt_install(PUPPET_AGENT_PKGS)
    render_puppet_conf(PUPPET_CONF_CTXT)
    hookenv.status_set('active', 'Puppet-agent: %s installed.' % PUPPET_VERSION)
    def install_benchmark(self):
        install_sb = hookenv.config()['spark_bench_enabled']
        sb_dir = '/home/ubuntu/spark-bench'
        if install_sb:
            if not unitdata.kv().get('spark_bench.installed', False):
                if utils.cpu_arch() == 'ppc64le':
                    sb_url = hookenv.config()['spark_bench_ppc64le']
                else:
                    # TODO: may need more arch cases (go with x86 sb for now)
                    sb_url = hookenv.config()['spark_bench_x86_64']

                Path(sb_dir).rmtree_p()
                au = ArchiveUrlFetchHandler()
                au.install(sb_url, '/home/ubuntu')

                # #####
                # Handle glob if we use a .tgz that doesn't expand to sb_dir
                # sb_archive_dir = glob('/home/ubuntu/spark-bench-*')[0]
                # SparkBench expects to live in ~/spark-bench, so put it there
                # Path(sb_archive_dir).rename(sb_dir)
                # #####

                unitdata.kv().set('spark_bench.installed', True)
                unitdata.kv().flush(True)
        else:
            Path(sb_dir).rmtree_p()
            unitdata.kv().set('spark_bench.installed', False)
            unitdata.kv().flush(True)
示例#5
0
def download_from_upstream():
    if not config('fallback_url') or not config('fallback_sum'):
        status_set('blocked', 'Missing configuration: ')
        return None
    client = ArchiveUrlFetchHandler()
    return client.download_and_validate(config('fallback_url'),
                                        config('fallback_sum'))
def download_default_image():
    status_set("maintenance", "Downloading default image")
    fetcher = ArchiveUrlFetchHandler()
    fetcher.download(
        source="https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img",
        dest="/opt/VNF/images/ubuntu-16.04-server-cloudimg-amd64-disk1.img"
        # TODO: add checksum
    )
示例#7
0
def install_layer_opentsdb():
    status_set('maintenance', 'Installing OpenTSDB...')
    fetcher = ArchiveUrlFetchHandler()
    fetcher.download(
        'https://github.com/OpenTSDB/opentsdb/releases/download/v2.3.0/opentsdb-2.3.0_all.deb',
        '/opt/opentsdb-2.3.0_all.deb')
    subprocess.check_call(['dpkg', '-i', '/opt/opentsdb-2.3.0_all.deb'])
    set_state('layer-opentsdb.installed')
示例#8
0
def download_default_image():
    status_set("maintenance", "Downloading default image")
    fetcher = ArchiveUrlFetchHandler()
    fetcher.download(
        source="https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img",
        dest="/opt/VNF/images/ubuntu-16.04-server-cloudimg-amd64-disk1.img"
        # TODO: add checksum
    )
    def test_hashes_and_files(self):

        h = ArchiveUrlFetchHandler()

        for deb, sha in DEBS.iteritems():
            try:
                h.download_and_validate(full_url(deb), sha, validate="sha1")
            except Exception as e:
                self.fail("download and validate failed: " + str(e))
示例#10
0
def install_from_archive_url(url, dest):
    dlog("Trying to install from archive url: {}".format(url))
    try:
        handler = ArchiveUrlFetchHandler()
        ddir = handler.install(url)
        if os.path.exists(dest):
            dlog("Removing existing directory at {}".format(dest))
            shutil.rmtree(dest)
        src = os.path.join(ddir, "src", "cinder", "volume", "drivers", "datera")
        dlog("Copying tree. src [{}] dst [{}]".format(src, dest))
        shutil.copytree(src, dest)
    except Exception as e:
        raise DateraException(
            "Could not install from archive url: {}".format(e))
    def fetch_install_puppet_deb(self, puppet):
        '''Fetch and install the puppet deb
        '''
        hookenv.status_set('maintenance',
                           'Configuring Puppetlabs apt sources')
        aufh = ArchiveUrlFetchHandler()
        aufh.download(self.puppet_deb_url(), self.puppet_deb_temp())
        dpkg_puppet_deb = 'dpkg -i %s' % self.puppet_deb_temp()
        call(dpkg_puppet_deb.split(), shell=False)
        apt_update()

        # Clean up
        rm_trusty_puppet_deb = 'rm %s' % self.puppet_deb_temp()
        call(rm_trusty_puppet_deb.split(), shell=False)
        self.puppet_active()
示例#12
0
def download_tomcat():
    '''Downloads Tomcat from Apache archive and extracts the tarball.'''
    status_set('maintenance', 'Downloading Tomcat...')

    if not os.path.isfile('/opt/apache-tomcat-9.0.1.tar.gz'):
        fetcher = ArchiveUrlFetchHandler()
        fetcher.download(
            'https://archive.apache.org/dist/tomcat/tomcat-9/v9.0.1/bin/apache-tomcat-9.0.1.tar.gz',
            '/opt/apache-tomcat-9.0.1.tar.gz')

    if not os.path.isdir(TOMCAT_DIR):
        subprocess.check_call(
            ['tar', 'xvzf', '/opt/apache-tomcat-9.0.1.tar.gz', '-C', '/opt'])

    set_state('layer-tomcat.downloaded')
示例#13
0
def install():
    """ Install Hook """
    log('ftb-infinity: install')
    status_set('maintenance', 'installing FTB modpack')

    # Add user
    adduser(FTB_USER)
    mkdir(FTB_HOME, owner=FTB_USER, group=FTB_USER, perms=0o750)
    check_call(['usermod', '-s', '/bin/bash', '-d', FTB_HOME, FTB_USER])

    # Download ftb
    ArchiveUrlFetchHandler().install(FTB_DL_URL, FTB_HOME)

    # Sanitize permissions, zip!
    chownr(FTB_HOME, FTB_USER, FTB_USER)
    path = os.path.join(FTB_HOME, 'FTBInstall.sh')
    s = os.stat(path)
    os.chmod(path, s.st_mode | stat.S_IXUSR | stat.S_IXGRP)

    # Accept EULA
    sed(os.path.join(FTB_HOME, 'eula.txt'), 'eula=false', 'eula=true')

    # Download minecraft jars
    with chdir(FTB_HOME):
        check_call(['sudo', '-u', FTB_USER, '-H', os.path.join(FTB_HOME, 'FTBInstall.sh')])

    # Render server.properties
    ftb_config_server()

    # Deploy systemd service
    ftb_systemd_install()

    set_state(CHARM_STATE_AVAILABLE)
    status_set('waiting', 'ftb downloaded')
示例#14
0
 def download_platform(self):
     hookenv.status_set('maintenance', 'Downloading platform')
     url = self.get_platform_package_url()
     package = self.get_platform_package_name()
     log("Platform package url: " + url, INFO)
     aufh = ArchiveUrlFetchHandler()
     dest_file = "/tmp/" + package
     aufh.download(url, dest_file)
     fetch.apt_update()
     checksum = self.config['package-checksum']
     if checksum:
         hash_type = self.config['hash-type']
         if not hash_type:
             hash_type = 'md5'
         host.check_hash(dest_file, checksum, hash_type)
     return dest_file
示例#15
0
def install():
    status_set('maintenance', 'Executing pre-install')
    execd_preinstall()
    openstack_origin = config('openstack-origin')
    configure_installation_source(openstack_origin)
    neutron_plugin = config('neutron-plugin')
    additional_install_locations(neutron_plugin, openstack_origin)

    add_source(config('extra-source'), config('extra-key'))
    status_set('maintenance', 'Installing apt packages')
    apt_update(fatal=True)
    packages = determine_packages(openstack_origin)
    apt_install(packages, fatal=True)

    if neutron_plugin == 'vsp':
        source = config('nuage-tarball-url')
        if source is not None:
            try:
                handler = ArchiveUrlFetchHandler()
                packages = ['nuage-neutron']
                path = handler.install(source)
                for package in packages:
                    package_path = os.path.join(path, package)
                    if os.path.exists(package_path):
                        log('install {0} from: {1}'.format(
                            package, package_path))
                        check_output([
                            'bash', '-c',
                            'cd {}; sudo python setup.py install'.format(
                                package_path)
                        ])
            except Exception as e:
                log('install failed with error: {}'.format(e.message))
                raise Exception(e)

    status_set('maintenance', 'Git install')
    git_install(config('openstack-origin-git'))

    [open_port(port) for port in determine_ports()]

    if neutron_plugin == 'midonet':
        mkdir('/etc/neutron/plugins/midonet',
              owner='neutron',
              group='neutron',
              perms=0o755,
              force=False)
def install():
    status_set('maintenance', 'Executing pre-install')
    execd_preinstall()
    openstack_origin = config('openstack-origin')
    configure_installation_source(openstack_origin)
    neutron_plugin = config('neutron-plugin')
    additional_install_locations(neutron_plugin, openstack_origin)

    add_source(config('extra-source'), config('extra-key'))
    status_set('maintenance', 'Installing apt packages')
    apt_update(fatal=True)
    packages = determine_packages(openstack_origin)
    apt_install(packages, fatal=True)

    if neutron_plugin == 'vsp':
        source = config('nuage-tarball-url')
        if source is not None:
            try:
                handler = ArchiveUrlFetchHandler()
                packages = ['nuage-neutron']
                path = handler.install(source)
                for package in packages:
                    package_path = os.path.join(path, package)
                    if os.path.exists(package_path):
                        log('install {0} from: {1}'.format(package,
                                                           package_path))
                        check_output(
                            [
                                'bash', '-c',
                                'cd {}; sudo python setup.py install'.format(
                                    package_path)
                            ]
                        )
            except Exception as e:
                log('install failed with error: {}'.format(e.message))
                raise Exception(e)

    status_set('maintenance', 'Git install')
    git_install(config('openstack-origin-git'))

    [open_port(port) for port in determine_ports()]

    if neutron_plugin == 'midonet':
        mkdir('/etc/neutron/plugins/midonet', owner='neutron', group='neutron',
              perms=0o755, force=False)
示例#17
0
def install_layer_telegraf():
    """Installs the Telegraf software if it is not already installed."""
    if is_telegraf_installed():
        increment_number_telegrafs()
    else:
        status_set('maintenance', 'Installing Telegraf...')
        fetcher = ArchiveUrlFetchHandler()
        if not os.path.isdir('/opt/telegraf'):
            mkdir('/opt/telegraf')
        fetcher.download('https://dl.influxdata.com/telegraf/releases/telegraf_1.4.5-1_amd64.deb',
                         '/opt/telegraf/telegraf_1.4.5-1_amd64.deb')
        subprocess.check_call(['dpkg', '--force-confdef', '-i',
                               '/opt/telegraf/telegraf_1.4.5-1_amd64.deb'])
        shutil.copyfile('files/plugins.json', '/opt/telegraf/plugins.json')
        shutil.copyfile('files/telegraf.json', '/opt/telegraf/telegraf.json')
        increment_number_telegrafs()

    set_flag('layer-telegraf.installed')
示例#18
0
    def install(self, force=false):
        if not force and self.is_installed():
            return

        # download Bigtop release; unpack the recipes
        bigtop_dir = '/home/ubuntu/bigtop.release'
        if not unitdata.kv().get('bigtop-release.installed', False):
            Path(bigtop_dir).rmtree_p()
            au = ArchiveUrlFetchHandler()
            au.install(bigtop_dir, '/home/ubuntu')

            unitdata.kv().set('bigtop-release.installed', True)
            unitdata.kv().flush(True)

        hiera_dst = self.dist_config.bigtop_hiera('path')
        hiera_conf = self.dist_config.bigtop_hiera('source')
        utils.re_edit_in_place(
            hiera_conf, {
                r'.*:datadir.*': '{0}/{1}'.format(hiera_dst, hiera_conf),
            })

        # generate site.yaml. Something like this would do
        setup_bigtop_config(
            bigtop_dir,
            "{0}/hieradata/site.yaml".format(os.path.dirname(hiera_conf)))

        # install required puppet modules
        try:
            utils.run_as('root', 'puppet', 'module', 'install',
                         'puppetlabs-stdlib', 'puppetlabs-apt')
        except CalledProcessError:
            pass  # All modules are set

        try:
            utils.run_as(
                'root', 'root', 'puppet', 'apply', '-d',
                '--modulepath="bigtop-deploy/puppet/modules:/etc/puppet/modules"',
                'bigtop-deploy/puppet/manifests/site.pp')
        except CalledProcessError:
            pass  # Everything seems to be fine

        unitdata.kv().set('bigtop.installed', True)
        unitdata.kv().flush(True)
    def download_plugin(self, plugin_name):
        hookenv.status_set('maintenance', 'Downloading plugin ' + plugin_name)
        url = self.get_plugin_package_url(plugin_name)
        package = self.get_plugin_package_name(plugin_name)
        log("Plugin package url: " + url, INFO)
        aufh =  ArchiveUrlFetchHandler()
        dest_file = "/tmp/" + package
        aufh.download(url, dest_file)
        fetch.apt_update()

        hash_key = plugin_name + '-hash' 
        checksum = ''
        if hash_key in self.config.keys(): 
           checksum = self.config[hash_key]
        if checksum:
            hash_type = self.config['hash-type']
            if not hash_type:
                hash_type = 'md5'
            host.check_hash(dest_file, checksum, hash_type)
        return dest_file
示例#20
0
def install_deps():
    # Pull dependencies
    status_set('maintenance', 'fetching rocket.chat packages')
    log('fetching rocket.chat packages', level='info')
    apt_install(['nodejs', 'build-essential', 'npm'])
    # subprocess.run(['sudo', 'npm', 'install', '-g', 'n'])
    # subprocess.run(['sudo', 'n', '4.5'])
    # Pull latest version of Rocket.Chat
    handler = ArchiveUrlFetchHandler()
    status_set('maintenance', 'fetching rocket.chat')
    log('fetching rocket.chat', level='info')
    handler.install('https://rocket.chat/releases/latest/download',
                    dest=charm_path)

    # Unpack Rocket.Chat to destination folder
    subprocess.run(['mv', charm_path + '/bundle/', '/opt/Rocket.Chat'])
    os.chdir('/opt/Rocket.Chat/programs/server')
    subprocess.run(['sudo', 'npm', 'install'])
    copyfile(charm_path + '/files/rocketchat.service',
             '/etc/systemd/system/rocketchat.service')
    status_set('maintenance', 'packages installed')
    set_state('rocketchat.ready')
    def install(self, force=false):
        if not force and self.is_installed():
            return

        # download Bigtop release; unpack the recipes
        bigtop_dir = '/home/ubuntu/bigtop.release'
        if not unitdata.kv().get('bigtop-release.installed', False):
            Path(bigtop_dir).rmtree_p()
            au = ArchiveUrlFetchHandler()
            au.install(bigtop_dir, '/home/ubuntu')

            unitdata.kv().set('bigtop-release.installed', True)
            unitdata.kv().flush(True)

        hiera_dst = self.dist_config.bigtop_hiera('path')
        hiera_conf = self.dist_config.bigtop_hiera('source')
        utils.re_edit_in_place(hiera_conf, {
            r'.*:datadir.*': '{0}/{1}'.format(hiera_dst, hiera_conf),
        })

        # generate site.yaml. Something like this would do
        setup_bigtop_config(bigtop_dir, "{0}/hieradata/site.yaml".format(os.path.dirname(hiera_conf)))

        # install required puppet modules
        try:
            utils.run_as('root', 'puppet', 'module', 'install', 'puppetlabs-stdlib', 'puppetlabs-apt')
        except CalledProcessError:
            pass # All modules are set

        try:
            utils.run_as('root', 'root', 'puppet', 'apply', '-d',
                         '--modulepath="bigtop-deploy/puppet/modules:/etc/puppet/modules"',
                         'bigtop-deploy/puppet/manifests/site.pp')
        except CalledProcessError:
            pass  # Everything seems to be fine

        unitdata.kv().set('bigtop.installed', True)
        unitdata.kv().flush(True)
示例#22
0
    def setup_simplesamlphp(self):
        if os.path.exists(self.DEST_DIR):
            os.rmdir(self.DEST_DIR)

        version = self.config.get('simple-saml-php-version')
        archive_handler = ArchiveUrlFetchHandler()
        retry_on_error()(archive_handler.install)(
            source='{0}/v{1}/simplesamlphp-{1}.tar.gz'.format(
                self.BASE_DOWNLOAD_URL, version),
            dest=os.path.dirname(self.DEST_DIR))
        os.rename('{0}-{1}'.format(self.DEST_DIR, version), self.DEST_DIR)

        key_file = '{0}/cert/server.pem'.format(self.DEST_DIR)
        cert_file = '{0}/cert/server.crt'.format(self.DEST_DIR)
        ssl.generate_selfsigned(keyfile=key_file,
                                certfile=cert_file,
                                keysize=2048,
                                cn=get_unit_hostname())
        uid = pwd.getpwnam(self.APACHE_USER).pw_uid
        gid = grp.getgrnam(self.APACHE_GROUP).gr_gid
        os.chown(key_file, uid, gid)
        os.chown(cert_file, uid, gid)
示例#23
0
    def configure(self):
        '''
        Configure spark environment for all users
        '''
        spark_home = self.dist_config.path('spark')
        spark_bin = spark_home / 'bin'

        # handle tuning options that may be set as percentages
        driver_mem = '1g'
        req_driver_mem = hookenv.config()['driver_memory']
        executor_mem = '1g'
        req_executor_mem = hookenv.config()['executor_memory']
        if req_driver_mem.endswith('%'):
            if self.is_spark_local():
                mem_mb = host.get_total_ram() / 1024 / 1024
                req_percentage = float(req_driver_mem.strip('%')) / 100
                driver_mem = str(int(mem_mb * req_percentage)) + 'm'
            else:
                hookenv.log("driver_memory percentage in non-local mode. Using 1g default.",
                            level=None)
        else:
            driver_mem = req_driver_mem

        if req_executor_mem.endswith('%'):
            if self.is_spark_local():
                mem_mb = host.get_total_ram() / 1024 / 1024
                req_percentage = float(req_executor_mem.strip('%')) / 100
                executor_mem = str(int(mem_mb * req_percentage)) + 'm'
            else:
                hookenv.log("executor_memory percentage in non-local mode. Using 1g default.",
                            level=None)
        else:
            executor_mem = req_executor_mem

        # update environment variables
        with utils.environment_edit_in_place('/etc/environment') as env:
            if spark_bin not in env['PATH']:
                env['PATH'] = ':'.join([env['PATH'], spark_bin])
            env['MASTER'] = self.get_master()
            env['PYSPARK_DRIVER_PYTHON'] = "ipython"
            env['SPARK_CONF_DIR'] = self.dist_config.path('spark_conf')
            env['SPARK_DRIVER_MEMORY'] = driver_mem
            env['SPARK_EXECUTOR_MEMORY'] = executor_mem
            env['SPARK_HOME'] = spark_home
            env['SPARK_JAR'] = "hdfs:///user/ubuntu/share/lib/spark-assembly.jar"

        # update spark config
        spark_conf = self.dist_config.path('spark_conf') / 'spark-defaults.conf'
        utils.re_edit_in_place(spark_conf, {
            r'.*spark.master *.*': 'spark.master {}'.format(self.get_master()),
            r'.*spark.eventLog.enabled *.*': 'spark.eventLog.enabled true',
            r'.*spark.eventLog.dir *.*': 'spark.eventLog.dir hdfs:///user/ubuntu/directory',
            })
        spark_env = self.dist_config.path('spark_conf') / 'spark-env.sh'
        local_ip = utils.resolve_private_address(hookenv.unit_private_ip())
        utils.re_edit_in_place(spark_env, {
            r'.*SPARK_DRIVER_MEMORY.*': 'SPARK_DRIVER_MEMORY={}'.format(driver_mem),
            r'.*SPARK_EXECUTOR_MEMORY.*': 'SPARK_EXECUTOR_MEMORY={}'.format(executor_mem),
            r'.*SPARK_LOG_DIR.*': 'SPARK_LOG_DIR={}'.format(self.dist_config.path('spark_logs')),
            r'.*SPARK_MASTER_IP.*': 'SPARK_MASTER_IP={}'.format(local_ip),
            r'.*SPARK_WORKER_DIR.*': 'SPARK_WORKER_DIR={}'.format(self.dist_config.path('spark_work')),
            })

        # manage SparkBench
        install_sb = hookenv.config()['spark_bench_enabled']
        sb_dir = '/home/ubuntu/spark-bench'
        if install_sb:
            if utils.cpu_arch() == 'ppc64le':
                sb_url = hookenv.config()['spark_bench_ppc64le']
            else:
                # TODO: may need more arch cases (go with x86 sb for now)
                sb_url = hookenv.config()['spark_bench_x86_64']

            Path(sb_dir).rmtree_p()
            fetcher = ArchiveUrlFetchHandler()
            fetcher.install(sb_url, '/home/ubuntu')

            # #####
            # Handle glob if we use a .tgz that doesn't expand to sb_dir
            # sb_archive_dir = glob('/home/ubuntu/spark-bench-*')[0]
            # SparkBench expects to live in ~/spark-bench, so put it there
            # Path(sb_archive_dir).rename(sb_dir)
            # #####

            # comment out mem tunings (let them come from /etc/environment)
            sb_env = Path(sb_dir) / 'conf/env.sh'
            utils.re_edit_in_place(sb_env, {
                r'^SPARK_DRIVER_MEMORY.*': '# SPARK_DRIVER_MEMORY (use value from environment)',
                r'^SPARK_EXECUTOR_MEMORY.*': '# SPARK_EXECUTOR_MEMORY (use value from environment)',
                })
        else:
            Path(sb_dir).rmtree_p()
示例#24
0
    def configure_sparkbench(self):
        """
        Install/configure/remove Spark-Bench based on user config.

        If config[spark_bench_enabled], fetch, install, and configure
        Spark-Bench on initial invocation. Subsequent invocations will skip the
        fetch/install, but will reconfigure Spark-Bench since we may need to
        adjust the data dir (eg: benchmark data is stored in hdfs when spark
        is in yarn mode; locally in all other execution modes).
        """
        install_sb = hookenv.config()['spark_bench_enabled']
        sb_dir = '/home/ubuntu/SparkBench'
        if install_sb:
            # Fetch/install on our first go-round, then set unit data so we
            # don't reinstall every time this function is called.
            if not unitdata.kv().get('spark_bench.installed', False):
                sb_url = hookenv.config()['spark_bench_url']

                Path(sb_dir).rmtree_p()
                au = ArchiveUrlFetchHandler()
                au.install(sb_url, '/home/ubuntu')

                # NB: This block is unused when using one of our sb tgzs. It
                # may come in handy if people want a tgz that does not expand
                # to our expected sb_dir.
                # #####
                # Handle glob if we use a .tgz that doesn't expand to sb_dir
                # sb_archive_dir = glob('/home/ubuntu/SparkBench*')[0]
                # SparkBench expects to live in ~/SparkBench, so put it there
                # Path(sb_archive_dir).rename(sb_dir)
                # #####

                # Ensure users in the spark group can write to any subdirectory
                # of sb_dir (spark needs to write benchmark output there when
                # running in local modes).
                host.chownr(Path(sb_dir), 'ubuntu', 'spark', chowntopdir=True)
                for r, d, f in os.walk(sb_dir):
                    os.chmod(r, 0o2775)

                unitdata.kv().set('spark_bench.installed', True)
                unitdata.kv().flush(True)

            # Configure the SB env every time this function is called.
            sb_conf = '{}/conf'.format(sb_dir)
            sb_env = Path(sb_conf) / 'env.sh'
            if not sb_env.exists():
                (Path(sb_conf) / 'env.sh.template').copy(sb_env)

            # NB: A few notes on configuring SparkBench:
            # 1. Input data has been pregenerated and packed into the tgz. All
            # spark cluster members will have this data locally, which enables
            # us to execute benchmarks in the absense of HDFS. When spark is in
            # yarn mode, we'll need to generate and store this data in HDFS
            # so nodemanagers can access it (NMs obviously won't have SB
            # installed locally). Set DATA_HDFS to a local dir or common HDFS
            # location depending on our spark execution mode.
            #
            # 2. SB tries to SSH to spark workers to purge vmem caches. This
            # isn't possible in containers, nor is it possible in our env
            # because we don't distribute ssh keys among cluster members.
            # Set MC_LIST to an empty string to prevent this behavior.
            #
            # 3. Throughout SB, HADOOP_HOME/bin is used as the prefix for the
            # hdfs command. Bigtop's hdfs lives at /usr/bin/hdfs, so set the
            # SB HADOOP_HOME accordingly (it's not used for anything else).
            #
            # 4. Use our MASTER envar to set the SparkBench SPARK_MASTER url.
            # It is updated every time we (re)configure spark.
            mode = hookenv.config()['spark_execution_mode']
            if mode.startswith('yarn'):
                sb_data_dir = "hdfs:///user/ubuntu/SparkBench"
            else:
                sb_data_dir = "file://{}".format(sb_dir)

            utils.re_edit_in_place(
                sb_env, {
                    r'^DATA_HDFS *=.*': 'DATA_HDFS="{}"'.format(sb_data_dir),
                    r'^DATASET_DIR *=.*':
                    'DATASET_DIR="{}/dataset"'.format(sb_dir),
                    r'^MC_LIST *=.*': 'MC_LIST=""',
                    r'.*HADOOP_HOME *=.*': 'HADOOP_HOME="/usr"',
                    r'.*SPARK_HOME *=.*': 'SPARK_HOME="/usr/lib/spark"',
                    r'^SPARK_MASTER *=.*': 'SPARK_MASTER="$MASTER"',
                })
        else:
            # config[spark_bench_enabled] is false; remove it
            Path(sb_dir).rmtree_p()
            unitdata.kv().set('spark_bench.installed', False)
            unitdata.kv().flush(True)
    def configure(self):
        '''
        Configure spark environment for all users
        '''
        dc = self.dist_config
        spark_home = self.dist_config.path('spark')
        spark_bin = spark_home / 'bin'

        # handle tuning options that may be set as percentages
        driver_mem = '1g'
        req_driver_mem = hookenv.config()['driver_memory']
        executor_mem = '1g'
        req_executor_mem = hookenv.config()['executor_memory']
        if req_driver_mem.endswith('%'):
            if self.is_spark_local():
                mem_mb = host.get_total_ram() / 1024 / 1024
                req_percentage = float(req_driver_mem.strip('%')) / 100
                driver_mem = str(int(mem_mb * req_percentage)) + 'm'
            else:
                hookenv.log("driver_memory percentage in non-local mode. Using 1g default.",
                            level=None)
        else:
            driver_mem = req_driver_mem

        if req_executor_mem.endswith('%'):
            if self.is_spark_local():
                mem_mb = host.get_total_ram() / 1024 / 1024
                req_percentage = float(req_executor_mem.strip('%')) / 100
                executor_mem = str(int(mem_mb * req_percentage)) + 'm'
            else:
                hookenv.log("executor_memory percentage in non-local mode. Using 1g default.",
                            level=None)
        else:
            executor_mem = req_executor_mem

        # update environment variables
        with utils.environment_edit_in_place('/etc/environment') as env:
            if spark_bin not in env['PATH']:
                env['PATH'] = ':'.join([env['PATH'], spark_bin])
            env['MASTER'] = self.get_master()
            env['PYSPARK_DRIVER_PYTHON'] = "ipython"
            env['SPARK_CONF_DIR'] = self.dist_config.path('spark_conf')
            env['SPARK_DRIVER_MEMORY'] = driver_mem
            env['SPARK_EXECUTOR_MEMORY'] = executor_mem
            env['SPARK_HOME'] = spark_home

        events_dir = 'file://{}'.format(dc.path('spark_events'))
        if unitdata.kv().get('hdfs.available', False):
            prefix = dc.path('log_prefix')
            events_dir = dc.path('spark_events')
            events_dir = 'hdfs:///{}'.format(events_dir.replace(prefix, ''))

        # update spark-defaults
        spark_conf = self.dist_config.path('spark_conf') / 'spark-defaults.conf'
        utils.re_edit_in_place(spark_conf, {
            r'.*spark.master .*': 'spark.master {}'.format(self.get_master()),
            r'.*spark.eventLog.enabled .*': 'spark.eventLog.enabled true',
            r'.*spark.history.fs.logDirectory .*': 'spark.history.fs.logDirectory {}'.format(
                events_dir),
            r'.*spark.eventLog.dir .*': 'spark.eventLog.dir {}'.format(events_dir),
        }, append_non_matches=True)

        # update spark-env
        spark_env = self.dist_config.path('spark_conf') / 'spark-env.sh'
        utils.re_edit_in_place(spark_env, {
            r'.*SPARK_DRIVER_MEMORY.*': 'SPARK_DRIVER_MEMORY={}'.format(driver_mem),
            r'.*SPARK_EXECUTOR_MEMORY.*': 'SPARK_EXECUTOR_MEMORY={}'.format(executor_mem),
            r'.*SPARK_LOG_DIR.*': 'SPARK_LOG_DIR={}'.format(self.dist_config.path('spark_logs')),
            r'.*SPARK_WORKER_DIR.*': 'SPARK_WORKER_DIR={}'.format(self.dist_config.path('spark_work')),
        })

        # If zookeeper is available we should be in HA mode so we should not set the MASTER_IP
        if not unitdata.kv().get('zookeepers.available', False):
            master_ip = self.get_master_ip()
            utils.re_edit_in_place(spark_env, {
                r'.*SPARK_MASTER_IP.*': 'SPARK_MASTER_IP={}'.format(master_ip),
            })

        # manage SparkBench
        install_sb = hookenv.config()['spark_bench_enabled']
        sb_dir = '/home/ubuntu/spark-bench'
        if install_sb:
            if not unitdata.kv().get('spark_bench.installed', False):
                if utils.cpu_arch() == 'ppc64le':
                    sb_url = hookenv.config()['spark_bench_ppc64le']
                else:
                    # TODO: may need more arch cases (go with x86 sb for now)
                    sb_url = hookenv.config()['spark_bench_x86_64']

                Path(sb_dir).rmtree_p()
                au = ArchiveUrlFetchHandler()
                au.install(sb_url, '/home/ubuntu')

                # #####
                # Handle glob if we use a .tgz that doesn't expand to sb_dir
                # sb_archive_dir = glob('/home/ubuntu/spark-bench-*')[0]
                # SparkBench expects to live in ~/spark-bench, so put it there
                # Path(sb_archive_dir).rename(sb_dir)
                # #####

                # comment out mem tunings (let them come from /etc/environment)
                sb_env = Path(sb_dir) / 'conf/env.sh'
                utils.re_edit_in_place(sb_env, {
                    r'^SPARK_DRIVER_MEMORY.*': '# SPARK_DRIVER_MEMORY (use value from environment)',
                    r'^SPARK_EXECUTOR_MEMORY.*': '# SPARK_EXECUTOR_MEMORY (use value from environment)',
                })

                unitdata.kv().set('spark_bench.installed', True)
                unitdata.kv().flush(True)
        else:
            Path(sb_dir).rmtree_p()
            unitdata.kv().set('spark_bench.installed', False)
            unitdata.kv().flush(True)

        self.setup_init_scripts()
示例#26
0
    def configure(self):
        '''
        Configure spark environment for all users
        '''
        dc = self.dist_config
        spark_home = self.dist_config.path('spark')
        spark_bin = spark_home / 'bin'

        # handle tuning options that may be set as percentages
        driver_mem = '1g'
        req_driver_mem = hookenv.config()['driver_memory']
        executor_mem = '1g'
        req_executor_mem = hookenv.config()['executor_memory']
        if req_driver_mem.endswith('%'):
            if self.is_spark_local():
                mem_mb = host.get_total_ram() / 1024 / 1024
                req_percentage = float(req_driver_mem.strip('%')) / 100
                driver_mem = str(int(mem_mb * req_percentage)) + 'm'
            else:
                hookenv.log(
                    "driver_memory percentage in non-local mode. Using 1g default.",
                    level=None)
        else:
            driver_mem = req_driver_mem

        if req_executor_mem.endswith('%'):
            if self.is_spark_local():
                mem_mb = host.get_total_ram() / 1024 / 1024
                req_percentage = float(req_executor_mem.strip('%')) / 100
                executor_mem = str(int(mem_mb * req_percentage)) + 'm'
            else:
                hookenv.log(
                    "executor_memory percentage in non-local mode. Using 1g default.",
                    level=None)
        else:
            executor_mem = req_executor_mem

        # update environment variables
        with utils.environment_edit_in_place('/etc/environment') as env:
            if spark_bin not in env['PATH']:
                env['PATH'] = ':'.join([env['PATH'], spark_bin])
            env['MASTER'] = self.get_master()
            env['PYSPARK_DRIVER_PYTHON'] = "ipython"
            env['SPARK_CONF_DIR'] = self.dist_config.path('spark_conf')
            env['SPARK_DRIVER_MEMORY'] = driver_mem
            env['SPARK_EXECUTOR_MEMORY'] = executor_mem
            env['SPARK_HOME'] = spark_home

        events_dir = 'file://{}'.format(dc.path('spark_events'))
        if unitdata.kv().get('hdfs.available', False):
            prefix = dc.path('log_prefix')
            events_dir = dc.path('spark_events')
            events_dir = 'hdfs:///{}'.format(events_dir.replace(prefix, ''))

        # update spark-defaults
        spark_conf = self.dist_config.path(
            'spark_conf') / 'spark-defaults.conf'
        utils.re_edit_in_place(spark_conf, {
            r'.*spark.master .*':
            'spark.master {}'.format(self.get_master()),
            r'.*spark.eventLog.enabled .*':
            'spark.eventLog.enabled true',
            r'.*spark.history.fs.logDirectory .*':
            'spark.history.fs.logDirectory {}'.format(events_dir),
            r'.*spark.eventLog.dir .*':
            'spark.eventLog.dir {}'.format(events_dir),
        },
                               append_non_matches=True)

        # update spark-env
        spark_env = self.dist_config.path('spark_conf') / 'spark-env.sh'
        utils.re_edit_in_place(
            spark_env, {
                r'.*SPARK_DRIVER_MEMORY.*':
                'SPARK_DRIVER_MEMORY={}'.format(driver_mem),
                r'.*SPARK_EXECUTOR_MEMORY.*':
                'SPARK_EXECUTOR_MEMORY={}'.format(executor_mem),
                r'.*SPARK_LOG_DIR.*':
                'SPARK_LOG_DIR={}'.format(self.dist_config.path('spark_logs')),
                r'.*SPARK_WORKER_DIR.*':
                'SPARK_WORKER_DIR={}'.format(
                    self.dist_config.path('spark_work')),
            })

        # If zookeeper is available we should be in HA mode so we should not set the MASTER_IP
        if not unitdata.kv().get('zookeepers.available', False):
            master_ip = self.get_master_ip()
            utils.re_edit_in_place(spark_env, {
                r'.*SPARK_MASTER_IP.*':
                'SPARK_MASTER_IP={}'.format(master_ip),
            })

        # manage SparkBench
        install_sb = hookenv.config()['spark_bench_enabled']
        sb_dir = '/home/ubuntu/spark-bench'
        if install_sb:
            if not unitdata.kv().get('spark_bench.installed', False):
                if utils.cpu_arch() == 'ppc64le':
                    sb_url = hookenv.config()['spark_bench_ppc64le']
                else:
                    # TODO: may need more arch cases (go with x86 sb for now)
                    sb_url = hookenv.config()['spark_bench_x86_64']

                Path(sb_dir).rmtree_p()
                au = ArchiveUrlFetchHandler()
                au.install(sb_url, '/home/ubuntu')

                # #####
                # Handle glob if we use a .tgz that doesn't expand to sb_dir
                # sb_archive_dir = glob('/home/ubuntu/spark-bench-*')[0]
                # SparkBench expects to live in ~/spark-bench, so put it there
                # Path(sb_archive_dir).rename(sb_dir)
                # #####

                # comment out mem tunings (let them come from /etc/environment)
                sb_env = Path(sb_dir) / 'conf/env.sh'
                utils.re_edit_in_place(
                    sb_env, {
                        r'^SPARK_DRIVER_MEMORY.*':
                        '# SPARK_DRIVER_MEMORY (use value from environment)',
                        r'^SPARK_EXECUTOR_MEMORY.*':
                        '# SPARK_EXECUTOR_MEMORY (use value from environment)',
                    })

                unitdata.kv().set('spark_bench.installed', True)
                unitdata.kv().flush(True)
        else:
            Path(sb_dir).rmtree_p()
            unitdata.kv().set('spark_bench.installed', False)
            unitdata.kv().flush(True)

        self.setup_init_scripts()
示例#27
0
    def configure_sparkbench(self):
        """
        Install/configure/remove Spark-Bench based on user config.

        If config[spark_bench_enabled], fetch, install, and configure
        Spark-Bench on initial invocation. Subsequent invocations will skip the
        fetch/install, but will reconfigure Spark-Bench since we may need to
        adjust the data dir (eg: benchmark data is stored in hdfs when spark
        is in yarn mode; locally in all other execution modes).
        """
        install_sb = hookenv.config()['spark_bench_enabled']
        sb_dir = '/home/ubuntu/SparkBench'
        if install_sb:
            # Fetch/install on our first go-round, then set unit data so we
            # don't reinstall every time this function is called.
            if not unitdata.kv().get('spark_bench.installed', False):
                sb_url = hookenv.config()['spark_bench_url']

                Path(sb_dir).rmtree_p()
                au = ArchiveUrlFetchHandler()
                au.install(sb_url, '/home/ubuntu')

                # NB: This block is unused when using one of our sb tgzs. It
                # may come in handy if people want a tgz that does not expand
                # to our expected sb_dir.
                # #####
                # Handle glob if we use a .tgz that doesn't expand to sb_dir
                # sb_archive_dir = glob('/home/ubuntu/SparkBench*')[0]
                # SparkBench expects to live in ~/SparkBench, so put it there
                # Path(sb_archive_dir).rename(sb_dir)
                # #####

                # Ensure users in the spark group can write to any subdirectory
                # of sb_dir (spark needs to write benchmark output there when
                # running in local modes).
                host.chownr(Path(sb_dir), 'ubuntu', 'spark', chowntopdir=True)
                for r, d, f in os.walk(sb_dir):
                    os.chmod(r, 0o2775)

                unitdata.kv().set('spark_bench.installed', True)
                unitdata.kv().flush(True)

            # Configure the SB env every time this function is called.
            sb_conf = '{}/conf'.format(sb_dir)
            sb_env = Path(sb_conf) / 'env.sh'
            if not sb_env.exists():
                (Path(sb_conf) / 'env.sh.template').copy(sb_env)

            # NB: A few notes on configuring SparkBench:
            # 1. Input data has been pregenerated and packed into the tgz. All
            # spark cluster members will have this data locally, which enables
            # us to execute benchmarks in the absense of HDFS. When spark is in
            # yarn mode, we'll need to generate and store this data in HDFS
            # so nodemanagers can access it (NMs obviously won't have SB
            # installed locally). Set DATA_HDFS to a local dir or common HDFS
            # location depending on our spark execution mode.
            #
            # 2. SB tries to SSH to spark workers to purge vmem caches. This
            # isn't possible in containers, nor is it possible in our env
            # because we don't distribute ssh keys among cluster members.
            # Set MC_LIST to an empty string to prevent this behavior.
            #
            # 3. Throughout SB, HADOOP_HOME/bin is used as the prefix for the
            # hdfs command. Bigtop's hdfs lives at /usr/bin/hdfs, so set the
            # SB HADOOP_HOME accordingly (it's not used for anything else).
            #
            # 4. Use our MASTER envar to set the SparkBench SPARK_MASTER url.
            # It is updated every time we (re)configure spark.
            mode = hookenv.config()['spark_execution_mode']
            if mode.startswith('yarn'):
                sb_data_dir = "hdfs:///user/ubuntu/SparkBench"
            else:
                sb_data_dir = "file://{}".format(sb_dir)

            utils.re_edit_in_place(sb_env, {
                r'^DATA_HDFS *=.*': 'DATA_HDFS="{}"'.format(sb_data_dir),
                r'^DATASET_DIR *=.*': 'DATASET_DIR="{}/dataset"'.format(sb_dir),
                r'^MC_LIST *=.*': 'MC_LIST=""',
                r'.*HADOOP_HOME *=.*': 'HADOOP_HOME="/usr"',
                r'.*SPARK_HOME *=.*': 'SPARK_HOME="/usr/lib/spark"',
                r'^SPARK_MASTER *=.*': 'SPARK_MASTER="$MASTER"',
            })
        else:
            # config[spark_bench_enabled] is false; remove it
            Path(sb_dir).rmtree_p()
            unitdata.kv().set('spark_bench.installed', False)
            unitdata.kv().flush(True)
示例#28
0
def config_changed():
    global charm_vm_config

    if not config('vsc-vm-ip-address'):
        e = 'vsc-vm-ip-address is not specified'
        status_set('blocked', e)
        raise ConfigurationError(e)
    if not config('vsc-vm-default-gw'):
        e = 'vsc-vm-default-gw is not specified'
        status_set('blocked', e)
        raise ConfigurationError(e)
    if not config('vsc-vm-dns-server'):
        e = 'vsc-vm-dns-server is not specified'
        status_set('blocked', e)
        raise ConfigurationError(e)

    if not config('vsc-vm-subnet-mask-length'):
        e = 'vsc-vm-subnet-mask is not specified'
        status_set('blocked', e)
        raise ConfigurationError(e)

    if not config('vsc-repository-url'):
        e = 'vsc-repository-url is not specified'
        status_set('blocked', e)
        raise ConfigurationError(e)

    vsc_vm_config_changed = False
    vsc_svc_config_changed = False
    if (config_value_changed('vsc-image-name')
            or config_value_changed('vsc-template-name')
            or config_value_changed('vsc-repository-url')
            or config_value_changed('vsc-vm-disk-size')
            or config_value_changed('vsc-vm-ip-address')
            or config_value_changed('vsc-vm-default-gw')
            or config_value_changed('vsc-vm-dns-server')
            or config_value_changed('vsc-vm-subnet-mask-length')
            or config_value_changed('bridge-name')
            or config_value_changed('vsc-vm-memory')):
        vsc_vm_config_changed = True

    if config_value_changed('admin-user') or\
            config_value_changed('xmpp-cluster-domain-name') or\
            config_value_changed('admin-password'):
        vsc_svc_config_changed = True

    if virshutils.is_vm_running(charm_vm_config) and not\
            vsc_vm_config_changed and not vsc_svc_config_changed:
        return

    if virshutils.is_vm_running(charm_vm_config) and vsc_vm_config_changed:
        stop()

    if virshutils.is_vm_running(charm_vm_config) and vsc_svc_config_changed:
        set_vsd_domain()
        return

    charm_templates_dir = os.path.join(os.environ.get('CHARM_DIR'),
                                       'templates')

    if not os.path.exists(charm_templates_dir):
        mkdir(charm_templates_dir)

    if not config('vsc-vm-disk-size'):
        charm_vm_config['VSP_VM_DISK_SIZE'] = '20G'
    else:
        charm_vm_config['VSP_VM_DISK_SIZE'] = config('vsc-vm-disk-size')

    charm_vm_config['VSP_VM_XML'] = config('vsc-template-name')
    charm_vm_config['VSP_VM_ORIG_IMAGE_NAME'] = config('vsc-image-name')

    source = config('vsc-repository-url')
    if (config_value_changed('vsc-repository-url')
            or (not has_db_value('vsc-repository-fetch-path'))):

        if source is None:
            e = 'vsc-repository-url has invalid value'
            status_set('blocked', e)
            raise ConfigurationError(e)

        handler = ArchiveUrlFetchHandler()
        path = handler.install(source)
        set_db_value('vsc-repository-fetch-path', path)

    path = get_db_value('vsc-repository-fetch-path')
    if path is None:
        e = 'vsc-repository fetch failed: {}'.format(source)
        status_set('blocked', e)
        raise ConfigurationError(e)

    log("path : {}".format(path))

    for root, dirnames, filenames in os.walk(path):
        if config('vsc-image-name') in filenames:
            path_of_vsc_image = os.path.join(root, config('vsc-image-name'))
            path = root
            log("file path vsc-image : {} ".format(path_of_vsc_image))
            log("file path root-folder : {} ".format(root))
            set_db_value('vsc-image-path', path_of_vsc_image)

    if not os.path.exists(get_db_value('vsc-image-path')):
        e = 'vsc-image not found in repository at: {}'.format(source)
        status_set('blocked', e)
        raise ConfigurationError(e)

    for root, dirnames, filenames in os.walk(path):
        if config('vsc-template-name') in filenames:
            path_of_vsc_template = os.path.join(root,
                                                config('vsc-template-name'))
            log("file path vsc-template: {} ".format(path_of_vsc_template))
            log("file path root-folder : {} ".format(root))
            set_db_value('vsc-template-path', path_of_vsc_template)

    if not os.path.exists(get_db_value('vsc-template-path')):
        e = 'vsc-template not found at: {}'.format(source)
        status_set('blocked', e)
        raise ConfigurationError(e)

    for root, dirnames, filenames in os.walk(path):
        if 'bof.cfg' in filenames:
            path_of_bof = os.path.join(root, 'bof.cfg')
            log("file path bof : {} ".format(path_of_bof))
            log("file path root-folder : {} ".format(root))
            set_db_value('vsc-bof-path', path_of_bof)

    from Cheetah.Template import Template
    cheetah_xml_file = Template(file=str(get_db_value('vsc-template-path')),
                                searchList=[{
                                    'vsp_image_name':
                                    str(config('vsc-image-name')),
                                    'bridge_name':
                                    str(config('bridge-name')),
                                    'memory':
                                    str(config('vsc-vm-memory'))
                                }])

    if cheetah_xml_file is None:
        e = 'Could not define cheetah_xml_file with'\
            ' configurable parameters'
        status_set('blocked', e)
        raise ConfigurationError(e)

    file = open(os.path.join(charm_templates_dir, config('vsc-template-name')),
                'w+')
    file.truncate()
    file.write(str(cheetah_xml_file))
    file.close()

    if os.path.exists(get_db_value('vsc-bof-path')):
        shutil.move(get_db_value('vsc-bof-path'),
                    os.path.join(charm_templates_dir, 'bof.cfg'))
    else:
        log('vsc-bof configuration is not found at: {}'.format(
            get_db_value('path_of_bof')))
    charm_vm_config['VSP_VM_DIR'] = charm_templates_dir
    charm_vm_config['VSP_VM_IMAGE_DIR'] = path

    if os.path.exists(os.path.join(charm_vm_config['VSP_VM_DIR'], 'bof.cfg')):
        with open(os.path.join(charm_vm_config['VSP_VM_DIR'], 'bof.cfg'),
                  "r") as bof:
            contents = bof.read()
            if (config('vsc-vm-ip-address') is not None
                    and config('vsc-vm-subnet-mask-length') is not None):
                contents = contents + "\n" + 'address ' +\
                    config('vsc-vm-ip-address') + '/' + \
                    config('vsc-vm-subnet-mask-length') + ' active'
            else:
                contents = contents + 'ip-address-dhcp' + '\n'
                contents = contents + 'address 169.254.10.1/24 active' + '\n'

            if config('vsc-vm-dns-server') is not None:
                contents = contents + '\n' + 'primary-dns ' + \
                    config('vsc-vm-dns-server') + '\n'
            else:
                contents = contents + "\n" + 'primary-dns ' + \
                    get_dns_ip() + '\n'
            if len(contents) > 0:
                virshutils.write_guestfs(
                    os.path.join(charm_vm_config['VSP_VM_IMAGE_DIR'],
                                 charm_vm_config['VSP_VM_ORIG_IMAGE_NAME']),
                    '/bof.cfg', contents)

    is_vm_created = virshutils.createvm(charm_vm_config)
    if is_vm_created == 0:
        e = 'could not create {} vm'.format(charm_vm_config['VSP_VM_NAME'])
        status_set('blocked', e)
        raise ConfigurationError(e)

    start()
示例#29
0
from charms.reactive import when, when_not, set_state
from charmhelpers.fetch.archiveurl import ArchiveUrlFetchHandler
from subprocess import check_call, CalledProcessError, call, check_output
from charmhelpers.core.hookenv import status_set, log, status_get, config
from charmhelpers.core import hookenv
from charmhelpers.core.host import adduser, chownr, mkdir

au = ArchiveUrlFetchHandler()


@when_not('solr.installed')
def install_solr():
    version = config('solr_version')
    adduser('solr')
    au.download(
        "http://archive.apache.org/dist/lucene/solr/{0}/solr-{0}.tgz".format(
            version), "/tmp/solr.tgz")
    mkdir('/opt/solr')
    check_output([
        'tar', 'xvfz', "/tmp/solr.tgz", '-C', "/opt/solr",
        '--strip-components=1'
    ])
    chownr('/opt/solr', 'solr', 'solr', chowntopdir=True)
    set_state('solr.installed')


#@when('zookeeper.joined')
#@when_not('zookeeper.ready')
# need to check for cloud setup
def wait_for_zookeeper(zookeeper):
    """
示例#30
0
    def configure(self):
        '''
        Configure spark environment for all users
        '''
        spark_home = self.dist_config.path('spark')
        spark_bin = spark_home / 'bin'

        # handle tuning options that may be set as percentages
        driver_mem = '1g'
        req_driver_mem = hookenv.config()['driver_memory']
        executor_mem = '1g'
        req_executor_mem = hookenv.config()['executor_memory']
        if req_driver_mem.endswith('%'):
            if self.is_spark_local():
                mem_mb = host.get_total_ram() / 1024 / 1024
                req_percentage = float(req_driver_mem.strip('%')) / 100
                driver_mem = str(int(mem_mb * req_percentage)) + 'm'
            else:
                hookenv.log(
                    "driver_memory percentage in non-local mode. Using 1g default.",
                    level=None)
        else:
            driver_mem = req_driver_mem

        if req_executor_mem.endswith('%'):
            if self.is_spark_local():
                mem_mb = host.get_total_ram() / 1024 / 1024
                req_percentage = float(req_executor_mem.strip('%')) / 100
                executor_mem = str(int(mem_mb * req_percentage)) + 'm'
            else:
                hookenv.log(
                    "executor_memory percentage in non-local mode. Using 1g default.",
                    level=None)
        else:
            executor_mem = req_executor_mem

        # update environment variables
        with utils.environment_edit_in_place('/etc/environment') as env:
            if spark_bin not in env['PATH']:
                env['PATH'] = ':'.join([env['PATH'], spark_bin])
            env['MASTER'] = self.get_master()
            env['PYSPARK_DRIVER_PYTHON'] = "ipython"
            env['SPARK_CONF_DIR'] = self.dist_config.path('spark_conf')
            env['SPARK_DRIVER_MEMORY'] = driver_mem
            env['SPARK_EXECUTOR_MEMORY'] = executor_mem
            env['SPARK_HOME'] = spark_home
            env['SPARK_JAR'] = "hdfs:///user/ubuntu/share/lib/spark-assembly.jar"

        # update spark config
        spark_conf = self.dist_config.path(
            'spark_conf') / 'spark-defaults.conf'
        utils.re_edit_in_place(
            spark_conf, {
                r'.*spark.master *.*':
                'spark.master {}'.format(self.get_master()),
                r'.*spark.eventLog.enabled *.*':
                'spark.eventLog.enabled true',
                r'.*spark.eventLog.dir *.*':
                'spark.eventLog.dir hdfs:///user/ubuntu/directory',
            })
        spark_env = self.dist_config.path('spark_conf') / 'spark-env.sh'
        local_ip = utils.resolve_private_address(hookenv.unit_private_ip())
        utils.re_edit_in_place(
            spark_env, {
                r'.*SPARK_DRIVER_MEMORY.*':
                'SPARK_DRIVER_MEMORY={}'.format(driver_mem),
                r'.*SPARK_EXECUTOR_MEMORY.*':
                'SPARK_EXECUTOR_MEMORY={}'.format(executor_mem),
                r'.*SPARK_LOG_DIR.*':
                'SPARK_LOG_DIR={}'.format(self.dist_config.path('spark_logs')),
                r'.*SPARK_MASTER_IP.*':
                'SPARK_MASTER_IP={}'.format(local_ip),
                r'.*SPARK_WORKER_DIR.*':
                'SPARK_WORKER_DIR={}'.format(
                    self.dist_config.path('spark_work')),
            })

        # manage SparkBench
        install_sb = hookenv.config()['spark_bench_enabled']
        sb_dir = '/home/ubuntu/spark-bench'
        if install_sb:
            if utils.cpu_arch() == 'ppc64le':
                sb_url = hookenv.config()['spark_bench_ppc64le']
            else:
                # TODO: may need more arch cases (go with x86 sb for now)
                sb_url = hookenv.config()['spark_bench_x86_64']

            Path(sb_dir).rmtree_p()
            fetcher = ArchiveUrlFetchHandler()
            fetcher.install(sb_url, '/home/ubuntu')

            # #####
            # Handle glob if we use a .tgz that doesn't expand to sb_dir
            # sb_archive_dir = glob('/home/ubuntu/spark-bench-*')[0]
            # SparkBench expects to live in ~/spark-bench, so put it there
            # Path(sb_archive_dir).rename(sb_dir)
            # #####

            # comment out mem tunings (let them come from /etc/environment)
            sb_env = Path(sb_dir) / 'conf/env.sh'
            utils.re_edit_in_place(
                sb_env, {
                    r'^SPARK_DRIVER_MEMORY.*':
                    '# SPARK_DRIVER_MEMORY (use value from environment)',
                    r'^SPARK_EXECUTOR_MEMORY.*':
                    '# SPARK_EXECUTOR_MEMORY (use value from environment)',
                })
        else:
            Path(sb_dir).rmtree_p()
def create_virt_env():
    """
    Checks if latest version is installed or else imports the new virtual env
    And installs the Datamover package.
    """
    usr = config('tvault-datamover-ext-usr')
    grp = config('tvault-datamover-ext-group')
    path = config('tvault-datamover-virtenv')
    venv_path = config('tvault-datamover-virtenv-path')
    tv_ip = config('triliovault-ip')
    dm_ver = None
    # create virtenv dir(/home/tvault) if it does not exist
    mkdir(path, owner=usr, group=grp, perms=501, force=True)

    latest_dm_ver = get_new_version('tvault-contego')
    if dm_ver == latest_dm_ver:
        log("Latest TrilioVault DataMover package is already installed,"
            " exiting")
        return True

    # Create virtual environment for DataMover
    handler = ArchiveUrlFetchHandler()
    try:
        # remove old venv if it exists
        os.system('rm -rf {}'.format(venv_path))
        venv_src = 'http://{}:8081/packages/queens_ubuntu'\
                   '/tvault-contego-virtenv.tar.gz'.format(tv_ip)
        venv_dest = path
        handler.install(venv_src, venv_dest)
        log("Virtual Environment installed successfully")
    except Exception as e:
        log("Failed to install Virtual Environment")
        return False

    # Get dependent libraries paths
    try:
        cmd = ['/usr/bin/python', 'files/trilio/get_pkgs.py']
        sym_link_paths = check_output(cmd).decode('utf-8').strip().split('\n')
    except Exception as e:
        log("Failed to get the dependent packages--{}".format(e))
        return False

    # Install TrilioVault Datamover package
    if not install_plugin(tv_ip, latest_dm_ver, '/usr'):
        return False

    # Create symlinks of the dependent libraries
    venv_pkg_path = '{}/lib/python2.7/site-packages/'.format(venv_path)
    os.system('rm -rf {}/cryptography'.format(venv_pkg_path))
    os.system('rm -rf {}/cffi'.format(venv_pkg_path))

    symlink(sym_link_paths[0], '{}/cryptography'.format(venv_pkg_path))
    symlink(sym_link_paths[2], '{}/cffi'.format(venv_pkg_path))

    os.system('cp {} {}/libvirtmod.so'.format(sym_link_paths[1],
                                              venv_pkg_path))
    os.system('cp {} {}/_cffi_backend.so'.format(sym_link_paths[3],
                                                 venv_pkg_path))

    # change virtenv dir(/home/tvault) users to nova
    chownr(path, usr, grp)

    # Copy Trilio sudoers and filters files
    os.system('cp files/trilio/trilio_sudoers /etc/sudoers.d/')
    os.system('cp files/trilio/trilio.filters /etc/nova/rootwrap.d/')

    return True
 def fetch_bigtop_release(self):
     # download Bigtop release; unpack the recipes
     bigtop_url = self.options.get('bigtop_release_url')
     Path(self.bigtop_dir).rmtree_p()
     au = ArchiveUrlFetchHandler()
     au.install(bigtop_url, self.bigtop_dir)