Exemple #1
0
 def __init__(self, dist_config):
     self.user = '******'
     self.dist_config = dist_config
     self.resources = {
         'livy': 'livy-%s' % utils.cpu_arch(),
     }
     self.verify_resources = utils.verify_resources(*self.resources.values())
Exemple #2
0
 def __init__(self, dist_config):
     self.dist_config = dist_config
     self.resources = {
         'flume': 'flume-%s' % host.cpu_arch(),
         'zookeeper': 'zookeeper-%s' % host.cpu_arch(),
     }
     self.verify_resources = utils.verify_resources(*self.resources.values())
Exemple #3
0
 def __init__(self, dist_config):
     self.dist_config = dist_config
     self.resources = {
         'kafka': 'kafka-%s' % host.cpu_arch(),
     }
     self.verify_resources = utils.verify_resources(
         *self.resources.values())
Exemple #4
0
 def __init__(self, dist_config):
     self.dist_config = dist_config
     self.resources = {
         'spark': 'spark-%s' % utils.cpu_arch(),
     }
     self.verify_resources = utils.verify_resources(
         *self.resources.values())
Exemple #5
0
 def __init__(self, dist_config=None):
     self.dist_config = dist_config or utils.DistConfig()
     self.resources = {
         'kafka': 'kafka-%s' % utils.cpu_arch(),
     }
     self.verify_resources = utils.verify_resources(
         *self.resources.values())
 def __init__(self, dist_config):
     self.dist_config = dist_config
     self.cpu_arch = utils.cpu_arch()
     self.resources = {
         'hive': 'hive-%s' % self.cpu_arch,
     }
     self.verify_resources = utils.verify_resources(*self.resources.values())
 def __init__(self, dist_config=None, user='******'):
     self.user = user
     self.dist_config = dist_config or utils.DistConfig()
     self.resources = {
         'flume': 'flume-%s' % utils.cpu_arch(),
     }
     self.verify_resources = utils.verify_resources(*self.resources.values())
 def __init__(self, dist_config):
     self.dist_config = dist_config
     self.resources = {
         'java-installer': 'java-installer',
         'pig': 'pig-noarch',
     }
     self.verify_resources = utils.verify_resources(*self.resources.values())
Exemple #9
0
 def __init__(self, dist_config):
     self.dist_config = utils.DistConfig(filename='dist.yaml',
                                         required_keys=['vendor', 'packages',
                                                        'dirs', 'ports'])
     self.resources = {
         'spark': 'spark-%s' % host.cpu_arch(),
     }
     self.verify_resources = utils.verify_resources(*self.resources.values())
Exemple #10
0
 def __init__(self, dist_config):
     self.dist_config = dist_config
     self.resources = {
         'java-installer': 'java-installer',
         'pig': 'pig-noarch',
     }
     self.verify_resources = utils.verify_resources(
         *self.resources.values())
Exemple #11
0
 def __init__(self, dist_config=None, user='******'):
     self.user = user
     self.dist_config = dist_config or utils.DistConfig()
     self.resources = {
         'flume': 'flume-%s' % utils.cpu_arch(),
     }
     self.verify_resources = utils.verify_resources(
         *self.resources.values())
Exemple #12
0
 def __init__(self, dist_config):
     self.dist_config = dist_config
     self.resources = {
         'flume': 'flume-%s' % host.cpu_arch(),
         'zookeeper': 'zookeeper-%s' % host.cpu_arch(),
     }
     self.verify_resources = utils.verify_resources(
         *self.resources.values())
Exemple #13
0
 def __init__(self, dist_config):
     self.user = '******'
     self.dist_config = dist_config
     self.resources = {
         'livy': 'livy-%s' % utils.cpu_arch(),
     }
     self.verify_resources = utils.verify_resources(
         *self.resources.values())
Exemple #14
0
 def __init__(self, dist_config):
     self.dist_config = dist_config
     self.cpu_arch = utils.cpu_arch()
     self.resources = {
         'hue': 'hue-{}'.format(self.cpu_arch),
     }
     self.verify_resources = utils.verify_resources(*self.resources.values())
     self.hue_config = ''.join((self.dist_config.path('hue'), '/desktop/conf/hue.ini'))
Exemple #15
0
 def __init__(self, dist_config):
     self.dist_config = dist_config
     self.cpu_arch = utils.cpu_arch()
     self.resources = {
         'hue': 'hue-%s' % self.cpu_arch,
     }
     self.verify_resources = utils.verify_resources(
         *self.resources.values())
    def __init__(self, hadoop_version, dist_config):
        self.dist_config = dist_config
        self.hadoop_version = hadoop_version
        self.cpu_arch = utils.cpu_arch()

        self.resources = {
            'gobblin': 'gobblin-hadoop_%s-%s' % (hadoop_version, self.cpu_arch),
        }
        self.verify_resources = utils.verify_resources(*self.resources.values())
Exemple #17
0
    def __init__(self, hadoop_version, dist_config):
        self.dist_config = dist_config
        self.cpu_arch = utils.cpu_arch()

        self.resources = {
            'gobblin':
            'gobblin-hadoop_%s-%s' % (hadoop_version, self.cpu_arch),
        }
        self.verify_resources = utils.verify_resources(
            *self.resources.values())
 def __init__(self, dist_config=None):
     self.dist_config = dist_config or utils.DistConfig()
     self.resources = {
         'flume': 'flume',
     }
     zk_res = 'zookeeper-%s' % utils.cpu_arch()
     if jujuresources.resource_defined(zk_res):
         self.resources['zookeeper'] = zk_res
     self.verify_resources = utils.verify_resources(
         *self.resources.values())
Exemple #19
0
 def verify_resources(self):
     try:
         filename = hookenv.resource_get('zeppelin')
         if not filename:
             return False
         if Path(filename).size == 0:
             # work around charm store resource upload issue
             # by falling-back to pulling from S3
             raise NotImplementedError()
         return True
     except NotImplementedError:
         if not jujuresources.resource_defined(self.resources['zeppelin']):
             return False
         return utils.verify_resources(*self.resources.values())()
    def extract_spark_binary(self, resource_key, version):
        spark_path = "{}-{}".format(self.dist_config.path('spark'), version)
        resource = self.resources[resource_key]
        if not utils.verify_resources(*[self.resources[resource_key]])():
            raise ResourceError("Failed to fetch Spark {} binary".format(version))
        jujuresources.install(resource,
                              destination=spark_path,
                              skip_top_level=True)

        default_conf = Path("{}/conf".format(spark_path))
        spark_conf_orig = Path("{}/conf.orig".format(spark_path))
        spark_conf_orig.rmtree_p()
        default_conf.copytree(spark_conf_orig)

        return spark_path
Exemple #21
0
    def extract_spark_binary(self, resource_key, version):
        spark_path = "{}-{}".format(self.dist_config.path('spark'), version)
        resource = self.resources[resource_key]
        if not utils.verify_resources(*[self.resources[resource_key]])():
            raise ResourceError(
                "Failed to fetch Spark {} binary".format(version))
        jujuresources.install(resource,
                              destination=spark_path,
                              skip_top_level=True)

        default_conf = Path("{}/conf".format(spark_path))
        spark_conf_orig = Path("{}/conf.orig".format(spark_path))
        spark_conf_orig.rmtree_p()
        default_conf.copytree(spark_conf_orig)

        return spark_path
    def __init__(self, dist_config):
        self.dist_config = dist_config
        self.charm_config = hookenv.config()
        self.cpu_arch = utils.cpu_arch()
        self.client_spec = {
            'hadoop': self.dist_config.hadoop_version,
        }

        # dist_config will have simple validation done on primary keys in the
        # dist.yaml, but we need to ensure deeper values are present.
        required_dirs = [
            'hadoop', 'hadoop_conf', 'hdfs_log_dir', 'mapred_log_dir',
            'yarn_log_dir'
        ]
        missing_dirs = set(required_dirs) - set(self.dist_config.dirs.keys())
        if missing_dirs:
            raise ValueError(
                'dirs option in {} is missing required entr{}: {}'.format(
                    self.dist_config.yaml_file,
                    'ies' if len(missing_dirs) > 1 else 'y',
                    ', '.join(missing_dirs)))

        # Build a list of hadoop resources needed from resources.yaml
        self.resources = {
            'java-installer': 'java-installer',
            'hadoop': 'hadoop-%s' % (self.cpu_arch),
        }
        hadoop_version = self.dist_config.hadoop_version
        versioned_res = 'hadoop-%s-%s' % (hadoop_version, self.cpu_arch)
        if jujuresources.resource_defined(versioned_res):
            self.resources['hadoop'] = versioned_res

        # LZO compression for hadoop is distributed separately. Add it to the
        # list of reqs if defined in resources.yaml
        lzo_res = 'hadoop-lzo-%s' % self.cpu_arch
        if jujuresources.resource_defined(lzo_res):
            self.resources['lzo'] = lzo_res

        # Verify and fetch the required hadoop resources
        self.verify_resources = utils.verify_resources(
            *self.resources.values())
        self.verify_conditional_resources = self.verify_resources  # for backwards compat
    def __init__(self, dist_config):
        self.dist_config = dist_config
        self.charm_config = hookenv.config()
        self.cpu_arch = host.cpu_arch()
        self.client_spec = {
            'hadoop': self.dist_config.hadoop_version,
        }

        # dist_config will have simple validation done on primary keys in the
        # dist.yaml, but we need to ensure deeper values are present.
        required_dirs = ['hadoop', 'hadoop_conf', 'hdfs_log_dir',
                         'yarn_log_dir']
        missing_dirs = set(required_dirs) - set(self.dist_config.dirs.keys())
        if missing_dirs:
            raise ValueError('dirs option in {} is missing required entr{}: {}'.format(
                self.dist_config.yaml_file,
                'ies' if len(missing_dirs) > 1 else 'y',
                ', '.join(missing_dirs)))

        # Build a list of hadoop resources needed from resources.yaml
        hadoop_resources = []
        hadoop_version = self.dist_config.hadoop_version
        try:
            jujuresources.resource_path('hadoop-%s-%s' % (hadoop_version, self.cpu_arch))
            hadoop_resources.append('hadoop-%s-%s' % (hadoop_version, self.cpu_arch))
        except KeyError:
            hadoop_resources.append('hadoop-%s' % (self.cpu_arch))

        # LZO compression for hadoop is distributed separately. Add it to the
        # list of reqs if defined in resources.yaml
        try:
            jujuresources.resource_path('hadoop-lzo-%s' % self.cpu_arch)
            hadoop_resources.append('hadoop-lzo-%s' % (self.cpu_arch))
        except KeyError:
            pass

        # Verify and fetch the required hadoop resources
        self.verify_conditional_resources = utils.verify_resources(*hadoop_resources)
Exemple #24
0
    def install(self, force=False):
        '''
        Create the directories. This method is to be called only once.

        :param bool force: Force the execution of the installation even if this
        is not the first installation attempt.
        '''
        destination = self.dist_config.path('zeppelin')

        if not self.verify_resources():
            return False

        if destination.exists() and not force:
            return True

        try:
            filename = hookenv.resource_get('zeppelin')
            if not filename:
                return False
            if Path(filename).size == 0:
                # work around charm store resource upload issue
                # by falling-back to pulling from S3
                raise NotImplementedError()
            destination.rmtree_p()  # if reinstalling
            extracted = Path(fetch.install_remote('file://' + filename))
            extracted.dirs()[0].copytree(destination)  # only copy nested dir
        except NotImplementedError:
            if not jujuresources.resource_defined(self.resources['zeppelin']):
                return False
            if not utils.verify_resources(*self.resources.values())():
                return False
            jujuresources.install(self.resources['zeppelin'],
                                  destination=destination,
                                  skip_top_level=True)

        self.dist_config.add_dirs()
        self.dist_config.add_packages()
        return True
Exemple #25
0
    def __init__(self, dist_config):
        self.dist_config = dist_config
        self.charm_config = hookenv.config()
        self.cpu_arch = utils.cpu_arch()
        self.client_spec = {
            'hadoop': self.dist_config.hadoop_version,
        }

        # dist_config will have simple validation done on primary keys in the
        # dist.yaml, but we need to ensure deeper values are present.
        required_dirs = ['hadoop', 'hadoop_conf', 'hdfs_log_dir',
                         'mapred_log_dir', 'yarn_log_dir']
        missing_dirs = set(required_dirs) - set(self.dist_config.dirs.keys())
        if missing_dirs:
            raise ValueError('dirs option in {} is missing required entr{}: {}'.format(
                self.dist_config.yaml_file,
                'ies' if len(missing_dirs) > 1 else 'y',
                ', '.join(missing_dirs)))

        # Build a list of hadoop resources needed from resources.yaml
        self.resources = {
            'java-installer': 'java-installer',
            'hadoop': 'hadoop-%s' % (self.cpu_arch),
        }
        hadoop_version = self.dist_config.hadoop_version
        versioned_res = 'hadoop-%s-%s' % (hadoop_version, self.cpu_arch)
        if jujuresources.resource_defined(versioned_res):
            self.resources['hadoop'] = versioned_res

        # LZO compression for hadoop is distributed separately. Add it to the
        # list of reqs if defined in resources.yaml
        lzo_res = 'hadoop-lzo-%s' % self.cpu_arch
        if jujuresources.resource_defined(lzo_res):
            self.resources['lzo'] = lzo_res

        # Verify and fetch the required hadoop resources
        self.verify_resources = utils.verify_resources(*self.resources.values())
        self.verify_conditional_resources = self.verify_resources  # for backwards compat
 def __init__(self, dist_config=None):
     self.dist_config = dist_config or utils.DistConfig()
     self.resources = {
         'datafellas-notebook': 'datafellas-notebook-%s' % utils.cpu_arch(),
     }
     self.verify_resources = utils.verify_resources(*self.resources.values())
Exemple #27
0
    def __init__(self, hadoop_version, dist_config):
        self.dist_config = dist_config
        self.cpu_arch = utils.cpu_arch()

        self.resources = {"gobblin": "gobblin-hadoop_%s-%s" % (hadoop_version, self.cpu_arch)}
        self.verify_resources = utils.verify_resources(*self.resources.values())
Exemple #28
0
 def __init__(self, dist_config=None):
     self.dist_config = dist_config or utils.DistConfig()
     self.resources = {
         'kafka': 'kafka-%s' % utils.cpu_arch(),
     }
     self.verify_resources = utils.verify_resources(*self.resources.values())
Exemple #29
0
 def __init__(self, dist_config):
     self.dist_config = dist_config
     self.resources = {
         'kafka': 'kafka-%s' % host.cpu_arch(),
     }
     self.verify_resources = utils.verify_resources(*self.resources.values())
    def __init__(self, dist_config):
        self.dist_config = dist_config
        self.cpu_arch = utils.cpu_arch()

        self.resources = {"livy": "livy-%s" % self.cpu_arch}
        self.verify_resources = utils.verify_resources(*self.resources.values())
 def __init__(self, dist_config):
     self.dist_config = dist_config
     self.resources = {
         'oozie': 'oozie-%s' % utils.cpu_arch(),
     }
     self.verify_resources = utils.verify_resources(*self.resources.values())
 def __init__(self, dist_config):
     self.dist_config = dist_config
     self.resources = {
         'spark': 'spark-noarch',
     }
     self.verify_resources = utils.verify_resources(*self.resources.values())