Пример #1
0
 def install(self, force=False):
     """Add dirs from dist.yaml and install Pig and java."""
     self.dist_config.add_dirs()
     jujuresources.install(self.resources['pig'],
                           destination=self.dist_config.path('pig'),
                           skip_top_level=True)
     install_java()
 def install(self):
     self.dist_config.add_users()
     self.dist_config.add_dirs()
     jujuresources.install(self.resources['zookeeper'],
                           destination=self.dist_config.path('zookeeper'),
                           skip_top_level=True)
     self.setup_zookeeper_config()
Пример #3
0
 def install(self):
     self.dist_config.add_users()
     self.dist_config.add_dirs()
     jujuresources.install(self.resources['zookeeper'],
                           destination=self.dist_config.path('zookeeper'),
                           skip_top_level=True)
     self.setup_zookeeper_config()
Пример #4
0
 def install(self, force=False):
     """Add dirs from dist.yaml and install Pig and java."""
     self.dist_config.add_dirs()
     jujuresources.install(self.resources['pig'],
                           destination=self.dist_config.path('pig'),
                           skip_top_level=True)
     install_java()
Пример #5
0
 def install(self):
     self.dist_config.add_users()
     self.dist_config.add_dirs()
     jujuresources.install(self.resources['kafka'],
                           destination=self.dist_config.path('kafka'),
                           skip_top_level=True)
     self.setup_kafka_config()
Пример #6
0
 def install(self):
     self.dist_config.add_users()
     self.dist_config.add_dirs()
     jujuresources.install(self.resources['kafka'],
                           destination=self.dist_config.path('kafka'),
                           skip_top_level=True)
     self.setup_kafka_config()
Пример #7
0
    def install(self):
        self.dist_config.add_users()
        self.dist_config.add_dirs()
        jujuresources.install(self.resources['livy'],
                              destination=self.dist_config.path('livy'),
                              skip_top_level=False)

        livy_bin = self.dist_config.path('livy') / 'bin'
        with utils.environment_edit_in_place('/etc/environment') as env:
            if livy_bin not in env['PATH']:
                env['PATH'] = ':'.join([env['PATH'], livy_bin])
            # Following classpath comes from `hadoop classpath` and should be fixed
            hadoop_cp = '/etc/hadoop/conf:/usr/lib/hadoop/share/hadoop/common/lib/*:/usr/lib/hadoop/share/hadoop/common/*\
:/usr/lib/hadoop/share/hadoop/hdfs:/usr/lib/hadoop/share/hadoop/hdfs/lib/*\
:/usr/lib/hadoop/share/hadoop/hdfs/*:/usr/lib/hadoop/share/hadoop/yarn/lib/*\
:/usr/lib/hadoop/share/hadoop/yarn/*:/usr/lib/hadoop/share/hadoop/mapreduce/lib/*\
:/usr/lib/hadoop/share/hadoop/mapreduce/*:/usr/lib/hadoop/contrib/capacity-scheduler/*.jar'

            env['CLASSPATH'] = hadoop_cp

        cmd = "chown -R {}:hadoop {}".format(self.user,
                                             self.dist_config.path('livy'))
        call(cmd.split())
        cmd = "chown -R {}:hadoop {}".format(
            self.user, self.dist_config.path('livy_conf'))
        call(cmd.split())
Пример #8
0
    def install(self, force=False):
        jujuresources.install(self.resources['oozie'],
                              destination=self.dist_config.path('oozie'),
                              skip_top_level=True)
        self.dist_config.add_users()
        self.dist_config.add_dirs()
        self.dist_config.add_packages()

        # ExtJS v2.2 should go under self.dist_config.path('ext22')
        jujuresources.fetch('ext22')
        src = jujuresources.resource_path('ext22')
        dest = self.dist_config.path('ext22')
        shutil.copy(src, dest)

        # self.dist_config.path('ext22') should also contain all files under
        # self.dist_config.path('oozie') / 'libtools'
        src = self.dist_config.path('oozie') / 'libtools'
        src_files = os.listdir(src)
        for file_name in src_files:
            full_file_name = os.path.join(src, file_name)
            if (os.path.isfile(full_file_name)):
                shutil.copy(full_file_name, dest)

        self.setup_oozie_config()
        self.configure_oozie_hdfs()
        self.set_oozie_env()
        self.build_oozie_sharelib()
        self.build_oozie_war_file()
        self.build_oozie_db()
Пример #9
0
 def install(self, force=False):
     if not force and self.is_installed():
         return
     self.dist_config.add_users()
     self.dist_config.add_dirs()
     jujuresources.install(self.resources['flink'],
                           destination=self.dist_config.path('flink'),
                           skip_top_level=True)
     #self.setup_flink_config()
     unitdata.kv().set('flink.installed', True)
Пример #10
0
    def install(self):
        jujuresources.install(self.resources['hue'],
                              destination=self.dist_config.path('hue'),
                              skip_top_level=True)

        self.dist_config.add_users()
        self.dist_config.add_dirs()
        self.dist_config.add_packages()
        chownr(self.dist_config.path('hue'), 'hue', 'hadoop')
        unitdata.kv().set('hue.installed', True)
Пример #11
0
    def install(self, force=False):
        if not force and self.is_installed():
            return
        self.pre_install()
        jujuresources.install(self.resources['hue'],
                              destination=self.dist_config.path('hue'),
                              skip_top_level=True)
        self.dist_config.add_users()
        self.dist_config.add_dirs()

        unitdata.kv().set('hue.installed', True)
        unitdata.kv().flush(True)
Пример #12
0
    def install(self, force=False):
        if not force and self.is_installed():
            return
        jujuresources.install(self.resources['hue'],
                              destination=self.dist_config.path('hue'),
                              skip_top_level=True)

        self.dist_config.add_users()
        self.dist_config.add_dirs()
        self.dist_config.add_packages()
        chownr(self.dist_config.path('hue'), 'hue', 'hadoop')
        unitdata.kv().set('hue.installed', True)
Пример #13
0
    def install(self, force=False):
        if not force and self.is_installed():
            return
        jujuresources.install(self.resources['hue'],
                              destination=self.dist_config.path('hue'),
                              skip_top_level=True)

        self.dist_config.add_users()
        self.dist_config.add_dirs()
        self.dist_config.add_packages()
        chownr(self.dist_config.path('hue'), 'hue', 'hadoop')
        unitdata.kv().set('hue.installed', True)
Пример #14
0
    def install(self):
        '''
        Create the users and directories. This method is to be called only once.

        :param bool force: Force the installation execution even if this is not the first installation attempt.
        '''
        jujuresources.install(self.resources['flume'],
                              destination=self.dist_config.path('flume'),
                              skip_top_level=True)
        self.dist_config.add_users()
        self.dist_config.add_dirs()
        self.dist_config.add_packages()
        self.setup_flume_config()
    def install(self):
        '''
        Create the users and directories. This method is to be called only once.

        :param bool force: Force the installation execution even if this is not the first installation attempt.
        '''
        jujuresources.install(self.resources['flume'],
                              destination=self.dist_config.path('flume'),
                              skip_top_level=True)
        self.dist_config.add_users()
        self.dist_config.add_dirs()
        self.dist_config.add_packages()
        self.setup_flume_config()
Пример #16
0
    def install(self, force=False):
        if not force and self.is_installed():
            return
        self.dist_config.add_dirs()
        self.dist_config.add_packages()
        jujuresources.install(self.resources['zeppelin'],
                              destination=self.dist_config.path('zeppelin'),
                              skip_top_level=True)
        self.setup_zeppelin_config()
        self.setup_zeppelin_tutorial()

        unitdata.kv().set('zeppelin.installed', True)
        unitdata.kv().flush(True)
Пример #17
0
    def install_hadoop(self):
        jujuresources.install(self.resources['hadoop'],
                              destination=self.dist_config.path('hadoop'),
                              skip_top_level=True)

        # Install our lzo compression codec if it's defined in resources.yaml
        if 'lzo' in self.resources:
            jujuresources.install(self.resources['lzo'],
                                  destination=self.dist_config.path('hadoop'),
                                  skip_top_level=False)
        else:
            msg = ("The hadoop-lzo-%s resource was not found."
                   "LZO compression will not be available." % self.cpu_arch)
            hookenv.log(msg)
Пример #18
0
def bootstrap():
    hookenv.status_set('maintenance', 'Installing base resources')
    apt_install(['python-pip', 'git'])  # git used for testing unreleased version of libs
    check_call(['pip', 'install', '-U', 'pip'])  # 1.5.1 (trusty) pip fails on --download with git repos
    mirror_url = hookenv.config('resources_mirror')
    if not jujuresources.fetch(mirror_url=mirror_url):
        missing = jujuresources.invalid()
        hookenv.status_set('blocked', 'Unable to fetch required resource%s: %s' % (
            's' if len(missing) > 1 else '',
            ', '.join(missing),
        ))
        return
    jujuresources.install(['pathlib', 'jujubigdata'])
    set_state('bootstrapped')
Пример #19
0
    def install_hadoop(self):
        jujuresources.install(self.resources['hadoop'],
                              destination=self.dist_config.path('hadoop'),
                              skip_top_level=True)

        # Install our lzo compression codec if it's defined in resources.yaml
        if 'lzo' in self.resources:
            jujuresources.install(self.resources['lzo'],
                                  destination=self.dist_config.path('hadoop'),
                                  skip_top_level=False)
        else:
            msg = ("The hadoop-lzo-%s resource was not found."
                   "LZO compression will not be available." % self.cpu_arch)
            hookenv.log(msg)
Пример #20
0
    def install(self, force=False):
        if not force and self.is_installed():
            return

        jujuresources.install(self.resources['zookeeper'],
                              destination=self.dist_config.path('zookeeper'),
                              skip_top_level=True)

        self.dist_config.add_users()
        utils.disable_firewall()
        self.dist_config.add_dirs()
        self.dist_config.add_packages()
        self.setup_zookeeper_config()
        self.configure_zookeeper()
        unitdata.kv().set('zookeeper.installed', True)
Пример #21
0
 def install(self, force=False):
     '''
     Create the users and directories. This method is to be called only once.
     
     :param bool force: Force the installation execution even if this is not the first installation attempt.
     '''
     if not force and self.is_installed():
         return
     jujuresources.install(self.resources['flume'],
                           destination=self.dist_config.path('flume'),
                           skip_top_level=True)
     self.dist_config.add_users()
     self.dist_config.add_dirs()
     self.setup_flume_config()
     unitdata.kv().set('flume_hdfs.installed', True)
    def extract_spark_binary(self, resource_key, version):
        spark_path = "{}-{}".format(self.dist_config.path('spark'), version)
        resource = self.resources[resource_key]
        if not utils.verify_resources(*[self.resources[resource_key]])():
            raise ResourceError("Failed to fetch Spark {} binary".format(version))
        jujuresources.install(resource,
                              destination=spark_path,
                              skip_top_level=True)

        default_conf = Path("{}/conf".format(spark_path))
        spark_conf_orig = Path("{}/conf.orig".format(spark_path))
        spark_conf_orig.rmtree_p()
        default_conf.copytree(spark_conf_orig)

        return spark_path
Пример #23
0
    def install(self, force=False):
        if not force and self.is_installed():
            return

        jujuresources.install(self.resources['zookeeper'],
                              destination=self.dist_config.path('zookeeper'),
                              skip_top_level=True)

        self.dist_config.add_users()
        utils.disable_firewall()
        self.dist_config.add_dirs()
        self.dist_config.add_packages()
        self.setup_zookeeper_config()
        self.configure_zookeeper()
        unitdata.kv().set('zookeeper.installed', True)
Пример #24
0
 def install(self, force=False):
     if not force and self.is_installed():
         return
     jujuresources.install(self.resources['flume'],
                           destination=self.dist_config.path('flume'),
                           skip_top_level=True)
     # FlumeSource needs the zookeeper jars
     jujuresources.install(self.resources['zookeeper'],
                           destination=self.dist_config.path('zookeeper'),
                           skip_top_level=True)
     self.dist_config.add_users()
     self.dist_config.add_dirs()
     self.dist_config.add_packages()
     self.setup_flume_config()
     self.configure_flume()
     unitdata.kv().set('flume_kafka.installed', True)
Пример #25
0
    def extract_spark_binary(self, resource_key, version):
        spark_path = "{}-{}".format(self.dist_config.path('spark'), version)
        resource = self.resources[resource_key]
        if not utils.verify_resources(*[self.resources[resource_key]])():
            raise ResourceError(
                "Failed to fetch Spark {} binary".format(version))
        jujuresources.install(resource,
                              destination=spark_path,
                              skip_top_level=True)

        default_conf = Path("{}/conf".format(spark_path))
        spark_conf_orig = Path("{}/conf.orig".format(spark_path))
        spark_conf_orig.rmtree_p()
        default_conf.copytree(spark_conf_orig)

        return spark_path
Пример #26
0
    def install(self, force=False):
        '''
        Create the users and directories. This method is to be called only once.

        :param bool force: Force the execution of the installation even if this is not the first installation attempt.
        '''
        if not force and self.is_installed():
            return
        jujuresources.install(self.resources['gobblin'],
                              destination=self.dist_config.path('gobblin'),
                              skip_top_level=True)
        self.dist_config.add_users()
        self.dist_config.add_dirs()

        unitdata.kv().set('gobblin.installed', True)
        unitdata.kv().flush(True)
Пример #27
0
 def install(self, force=False):
     if not force and self.is_installed():
         return
     jujuresources.install(self.resources['flume'],
                           destination=self.dist_config.path('flume'),
                           skip_top_level=True)
     # FlumeSource needs the zookeeper jars
     jujuresources.install(self.resources['zookeeper'],
                           destination=self.dist_config.path('zookeeper'),
                           skip_top_level=True)
     self.dist_config.add_users()
     self.dist_config.add_dirs()
     self.dist_config.add_packages()
     self.setup_flume_config()
     self.configure_flume()
     unitdata.kv().set('flume_kafka.installed', True)
Пример #28
0
    def install(self, force=False):
        """
        Create the directories. This method is to be called only once.

        :param bool force: Force the execution of the installation even if this is not the first installation attempt.
        """
        if not force and self.is_installed():
            return

        jujuresources.install(self.resources["livy"], destination=self.dist_config.path("livy"), skip_top_level=True)
        self.dist_config.add_dirs()
        self.dist_config.add_users()
        self.dist_config.add_packages()

        unitdata.kv().set("livy.prepared", True)
        unitdata.kv().flush(True)
Пример #29
0
    def install(self, force=False):
        if not force and self.is_installed():
            return
        jujuresources.install(self.resources['spark'],
                              destination=self.dist_config.path('spark'),
                              skip_top_level=True)

        self.dist_config.add_users()
        self.dist_config.add_dirs()
        self.dist_config.add_packages()

        # allow ubuntu user to ssh to itself so spark can ssh to its worker
        # in local/standalone modes
        utils.install_ssh_key('ubuntu', utils.get_ssh_key('ubuntu'))

        unitdata.kv().set('spark.installed', True)
        unitdata.kv().flush(True)
Пример #30
0
    def install(self, force=False):
        if not force and self.is_installed():
            return
        jujuresources.install(self.resources['spark'],
                              destination=self.dist_config.path('spark'),
                              skip_top_level=True)

        self.dist_config.add_users()
        self.dist_config.add_dirs()
        self.dist_config.add_packages()

        # allow ubuntu user to ssh to itself so spark can ssh to its worker
        # in local/standalone modes
        utils.install_ssh_key('ubuntu', utils.get_ssh_key('ubuntu'))

        unitdata.kv().set('spark.installed', True)
        unitdata.kv().flush(True)
Пример #31
0
    def install(self, force=False):
        if not force and self.is_installed():
            return
        self.dist_config.add_dirs()
        self.dist_config.add_packages()
        jujuresources.install(self.resources['spark'],
                              destination=self.dist_config.path('spark'),
                              skip_top_level=True)
        self.setup_spark_config()
        self.install_demo()

        # create hdfs storage space
        utils.run_as('hdfs', 'hdfs', 'dfs', '-mkdir', '-p', '/user/ubuntu/directory')
        utils.run_as('hdfs', 'hdfs', 'dfs', '-chown', '-R', 'ubuntu:hadoop', '/user/ubuntu/directory')

        unitdata.kv().set('spark.installed', True)
        unitdata.kv().flush(True)
Пример #32
0
def bootstrap_resources():
    """
    Attempt to load and install resources defined in resources.yaml
    """
    if unitdata.kv().get('charm.bootstrapped', False):
        return True
    hookenv.status_set('maintenance', 'Installing base resources')
    mirror_url = jujuresources.config_get('resources_mirror')
    if not jujuresources.fetch(mirror_url=mirror_url):
        missing = jujuresources.invalid()
        hookenv.status_set('blocked', 'Unable to fetch required resource%s: %s' % (
            's' if len(missing) > 1 else '',
            ', '.join(missing),
        ))
        return False
    jujuresources.install(['pathlib', 'jujubigdata'])
    unitdata.kv().set('charm.bootstrapped', True)
    return True
Пример #33
0
def bootstrap_resources():
    """
    Install required resources defined in resources.yaml
    """
    if unitdata.kv().get('charm.bootstrapped', False):
        return True
    hookenv.status_set('maintenance', 'Installing base resources')
    mirror_url = jujuresources.config_get('resources_mirror')
    if not jujuresources.fetch(mirror_url=mirror_url):
        missing = jujuresources.invalid()
        hookenv.status_set('blocked', 'Unable to fetch required resource%s: %s' % (
            's' if len(missing) > 1 else '',
            ', '.join(missing),
        ))
        return False
    jujuresources.install(['pathlib', 'jujubigdata'])
    unitdata.kv().set('charm.bootstrapped', True)
    return True
Пример #34
0
    def install_hadoop(self):
        hadoop_version = self.dist_config.hadoop_version
        try:
            jujuresources.install('hadoop-%s-%s' %
                                  (hadoop_version,
                                   self.cpu_arch),
                                  destination=self.dist_config.path('hadoop'),
                                  skip_top_level=True)
        except KeyError:
            hookenv.log("Falling back to non-version specific download of hadoop...")
            jujuresources.install('hadoop-%s' %
                                  (self.cpu_arch),
                                  destination=self.dist_config.path('hadoop'),
                                  skip_top_level=True)

        # Install our lzo compression codec if it's defined in resources.yaml
        try:
            jujuresources.install('hadoop-lzo-%s' % self.cpu_arch,
                                  destination=self.dist_config.path('hadoop'),
                                  skip_top_level=False)
            unitdata.kv().set('hadoop.lzo.installed', True)
        except KeyError:
            msg = ("The hadoop-lzo-%s resource was not found."
                   "LZO compression will not be available." % self.cpu_arch)
            hookenv.log(msg)
Пример #35
0
    def install(self, force=False):
        '''
        Create the directories. This method is to be called only once.

        :param bool force: Force the execution of the installation even if this
        is not the first installation attempt.
        '''
        destination = self.dist_config.path('zeppelin')

        if not self.verify_resources():
            return False

        if destination.exists() and not force:
            return True

        try:
            filename = hookenv.resource_get('zeppelin')
            if not filename:
                return False
            if Path(filename).size == 0:
                # work around charm store resource upload issue
                # by falling-back to pulling from S3
                raise NotImplementedError()
            destination.rmtree_p()  # if reinstalling
            extracted = Path(fetch.install_remote('file://' + filename))
            extracted.dirs()[0].copytree(destination)  # only copy nested dir
        except NotImplementedError:
            if not jujuresources.resource_defined(self.resources['zeppelin']):
                return False
            if not utils.verify_resources(*self.resources.values())():
                return False
            jujuresources.install(self.resources['zeppelin'],
                                  destination=destination,
                                  skip_top_level=True)

        self.dist_config.add_dirs()
        self.dist_config.add_packages()
        return True
Пример #36
0
    def install(self):
        self.dist_config.add_users()
        self.dist_config.add_dirs()
        jujuresources.install(self.resources['livy'],
                              destination=self.dist_config.path('livy'),
                              skip_top_level=False)

        livy_bin = self.dist_config.path('livy') / 'bin'
        with utils.environment_edit_in_place('/etc/environment') as env:
            if livy_bin not in env['PATH']:
                env['PATH'] = ':'.join([env['PATH'], livy_bin])
            # Following classpath comes from `hadoop classpath` and should be fixed
            hadoop_cp = '/etc/hadoop/conf:/usr/lib/hadoop/share/hadoop/common/lib/*:/usr/lib/hadoop/share/hadoop/common/*\
:/usr/lib/hadoop/share/hadoop/hdfs:/usr/lib/hadoop/share/hadoop/hdfs/lib/*\
:/usr/lib/hadoop/share/hadoop/hdfs/*:/usr/lib/hadoop/share/hadoop/yarn/lib/*\
:/usr/lib/hadoop/share/hadoop/yarn/*:/usr/lib/hadoop/share/hadoop/mapreduce/lib/*\
:/usr/lib/hadoop/share/hadoop/mapreduce/*:/usr/lib/hadoop/contrib/capacity-scheduler/*.jar'
            env['CLASSPATH'] = hadoop_cp

        cmd = "chown -R {}:hadoop {}".format(self.user, self.dist_config.path('livy'))
        call(cmd.split())
        cmd = "chown -R {}:hadoop {}".format(self.user, self.dist_config.path('livy_conf'))
        call(cmd.split())
Пример #37
0
    def install(self):
        '''
        install Spark and add dependencies in dist-config
        '''
        self.dist_config.add_dirs()
        self.dist_config.add_packages()
        jujuresources.install(self.resources['spark'],
                              destination=self.dist_config.path('spark'),
                              skip_top_level=True)

        # allow ubuntu user to ssh to itself so spark can ssh to its worker
        # in local/standalone modes
        utils.install_ssh_key('ubuntu', utils.get_ssh_key('ubuntu'))

        # put the spark jar in hdfs
        spark_assembly_jar = glob(
            '{}/lib/spark-assembly-*.jar'.format(self.dist_config.path('spark'))
        )[0]
        utils.run_as('hdfs', 'hdfs', 'dfs', '-mkdir', '-p',
                     '/user/ubuntu/share/lib')
        try:
            utils.run_as('hdfs', 'hdfs', 'dfs', '-put', spark_assembly_jar,
                         '/user/ubuntu/share/lib/spark-assembly.jar')
        except CalledProcessError:
            hookenv.log("File exists")

        # create hdfs storage space for history server
        utils.run_as('hdfs', 'hdfs', 'dfs', '-mkdir', '-p',
                     '/user/ubuntu/directory')
        utils.run_as('hdfs', 'hdfs', 'dfs', '-chown', '-R', 'ubuntu:hadoop',
                     '/user/ubuntu/directory')

        # create hdfs storage space for spark-bench
        utils.run_as('hdfs', 'hdfs', 'dfs', '-mkdir', '-p',
                     '/user/ubuntu/spark-bench')
        utils.run_as('hdfs', 'hdfs', 'dfs', '-chown', '-R', 'ubuntu:hadoop',
                     '/user/ubuntu/spark-bench')
Пример #38
0
    def install(self, force=False):
        if not force and self.is_installed():
            return
        jujuresources.install(self.resources['spark'],
                              destination=self.dist_config.path('spark'),
                              skip_top_level=True)

        # allow ubuntu user to ssh to itself so spark can ssh to its worker
        # in local/standalone modes
        utils.install_ssh_key('ubuntu', utils.get_ssh_key('ubuntu'))

        # put the spark jar in hdfs
        spark_assembly_jar = glob('{}/lib/spark-assembly-*.jar'.format(
                                  self.dist_config.path('spark')))[0]
        utils.run_as('hdfs', 'hdfs', 'dfs', '-mkdir', '-p',
                     '/user/ubuntu/share/lib')
        try:
            utils.run_as('hdfs', 'hdfs', 'dfs', '-put', spark_assembly_jar,
                         '/user/ubuntu/share/lib/spark-assembly.jar')
        except CalledProcessError:
            print ("File exists")

        # create hdfs storage space for history server
        utils.run_as('hdfs', 'hdfs', 'dfs', '-mkdir', '-p',
                     '/user/ubuntu/directory')
        utils.run_as('hdfs', 'hdfs', 'dfs', '-chown', '-R', 'ubuntu:hadoop',
                     '/user/ubuntu/directory')

        # create hdfs storage space for spark-bench
        utils.run_as('hdfs', 'hdfs', 'dfs', '-mkdir', '-p',
                     '/user/ubuntu/spark-bench')
        utils.run_as('hdfs', 'hdfs', 'dfs', '-chown', '-R', 'ubuntu:hadoop',
                     '/user/ubuntu/spark-bench')

        unitdata.kv().set('spark.installed', True)
        unitdata.kv().flush(True)
Пример #39
0
    def install(self):
        '''
        install Spark and add dependencies in dist-config
        '''
        self.dist_config.add_dirs()
        self.dist_config.add_packages()
        jujuresources.install(self.resources['spark'],
                              destination=self.dist_config.path('spark'),
                              skip_top_level=True)

        # allow ubuntu user to ssh to itself so spark can ssh to its worker
        # in local/standalone modes
        utils.install_ssh_key('ubuntu', utils.get_ssh_key('ubuntu'))

        # put the spark jar in hdfs
        spark_assembly_jar = glob('{}/lib/spark-assembly-*.jar'.format(
            self.dist_config.path('spark')))[0]
        utils.run_as('hdfs', 'hdfs', 'dfs', '-mkdir', '-p',
                     '/user/ubuntu/share/lib')
        try:
            utils.run_as('hdfs', 'hdfs', 'dfs', '-put', spark_assembly_jar,
                         '/user/ubuntu/share/lib/spark-assembly.jar')
        except CalledProcessError:
            hookenv.log("File exists")

        # create hdfs storage space for history server
        utils.run_as('hdfs', 'hdfs', 'dfs', '-mkdir', '-p',
                     '/user/ubuntu/directory')
        utils.run_as('hdfs', 'hdfs', 'dfs', '-chown', '-R', 'ubuntu:hadoop',
                     '/user/ubuntu/directory')

        # create hdfs storage space for spark-bench
        utils.run_as('hdfs', 'hdfs', 'dfs', '-mkdir', '-p',
                     '/user/ubuntu/spark-bench')
        utils.run_as('hdfs', 'hdfs', 'dfs', '-chown', '-R', 'ubuntu:hadoop',
                     '/user/ubuntu/spark-bench')
Пример #40
0
def install_sparkler():
    resource_key = "sparkler-0.1"
    resource = resources[resource_key]
    mkdir('/opt/sparkler/')
    jujuresources.install(resource, destination="/opt/sparkler/sparkler.jar")
    set_state('sparkler.installed')