def render_config(self, restart_trigger):
        """Render the domain specific LDAP configuration for the application
        """
        checksum = ch_host.file_hash(self.configuration_file)
        core.templating.render(source=KEYSTONE_CONF_TEMPLATE,
                               template_loader=os_templating.get_loader(
                                   'templates/', self.release),
                               target=self.configuration_file,
                               context=self.adapters_instance)

        tmpl_changed = (checksum != ch_host.file_hash(self.configuration_file))

        cert = hookenv.config('tls-ca-ldap')

        cert_changed = False
        if cert:
            ca_file = self.options.backend_ca_file
            old_cert_csum = ch_host.file_hash(ca_file)
            ch_host.write_file(ca_file,
                               cert,
                               owner='root',
                               group='root',
                               perms=0o644)
            cert_csum = ch_host.file_hash(ca_file)
            cert_changed = (old_cert_csum != cert_csum)

        if tmpl_changed or cert_changed:
            restart_trigger()
    def __call__(self, manager, service_name, event_name):
        pre_checksum = ''
        if self.on_change_action and os.path.isfile(self.target):
            pre_checksum = host.file_hash(self.target)
        service = manager.get_service(service_name)
        context = {'ctx': {}}
        for ctx in service.get('required_data', []):
            context.update(ctx)
            context['ctx'].update(ctx)

        result = templating.render(self.source,
                                   self.target,
                                   context,
                                   self.owner,
                                   self.group,
                                   self.perms,
                                   template_loader=self.template_loader)
        if self.on_change_action:
            if pre_checksum == host.file_hash(self.target):
                hookenv.log('No change detected: {}'.format(self.target),
                            hookenv.DEBUG)
            else:
                self.on_change_action()

        return result
示例#3
0
def update_certificates(module, cert, key, ca):
    certs_path = "/etc/contrail/ssl/{}".format(module)
    files = {
        "/certs/server.pem": (cert, 0o644),
        "/private/server-privkey.pem": (key, 0o640),
        "/certs/ca-cert.pem": (ca, 0o644)
    }
    # create common directories to create symlink
    # this is needed for contrail-status
    _try_os(os.makedirs, "/etc/contrail/ssl/certs")
    _try_os(os.makedirs, "/etc/contrail/ssl/private")
    changed = False
    for fkey in files:
        cfile = certs_path + fkey
        data = files[fkey][0]
        old_hash = file_hash(cfile)
        save_file(cfile, data, perms=files[fkey][1])
        changed |= (old_hash != file_hash(cfile))
        # create symlink to common place
        _try_os(os.remove, "/etc/contrail/ssl" + fkey)
        _try_os(os.symlink, cfile, "/etc/contrail/ssl" + fkey)
    # apply strange permissions to certs to allow containers to read them
    # group 1011 is a hardcoded group id for internal contrail purposes
    if os.path.exists(certs_path + "/certs"):
        os.chmod(certs_path + "/certs", 0o755)
    if os.path.exists(certs_path + "/private"):
        os.chmod(certs_path + "/private", 0o750)
        os.chown(certs_path + "/private", 0, 1011)
    if key:
        os.chown(certs_path + "/private/server-privkey.pem", 0, 1011)

    return changed
示例#4
0
def render_nimsoft_robot_config():
    """Create the nimbus.conf config file.

    Renders the appropriate template for the Nimbus Robot
    """
    # The v5 template is compatible with all versions < 6
    cfg_original_hash = file_hash(NIMBUS_ROBOT_CONFIG)
    context = {
        'hub': config("hub"),
        'domain': config("domain"),
        'hubip': config("hubip"),
        'hub_robot_name': config("hub-robot-name"),
        'secondary_domain': config("secondary-domain"),
        'secondary_hubip': config("secondary-hubip"),
        'secondary_hub': config("secondary-hub"),
        'secondary_hub_robot_name': config("secondary-hub-robot-name"),
        'private_address': unit_private_ip(),
        'hostname': os.uname()[1]
    }
    render('robot.cfg', NIMBUS_ROBOT_CONFIG, context=context)
    cfg_new_hash = file_hash(NIMBUS_ROBOT_CONFIG)

    rsync(charm_dir() + '/files/request_linux_prod.cfg',
          '/opt/nimsoft/request.cfg')

    # Install the nimbus service
    rsync(charm_dir() + '/files/nimbus.service',
          '/lib/systemd/system/nimbus.service')

    if cfg_original_hash != cfg_new_hash:
        service('restart', 'nimbus')
        status.active('nimbus ready.')
示例#5
0
def update_certificates(module, cert, key, ca):
    certs_path = "/etc/contrail/ssl/{}".format(module)
    # order is important: containers wait for key file as signal to start
    files = [
        ("/certs/ca-cert.pem", ca, 0o644),
        ("/certs/server.pem", cert, 0o644),
        ("/private/server-privkey.pem", key, 0o640),
    ]
    # create common directories to create symlink
    # this is needed for contrail-status
    _try_os(os.makedirs, "/etc/contrail/ssl/certs")
    _try_os(os.makedirs, "/etc/contrail/ssl/private")
    # create before files appear to set correct permisions
    _try_os(os.makedirs, certs_path + "/certs", mode=0o755)
    _try_os(os.makedirs, certs_path + "/private", mode=0o750)
    changed = False
    for fname, data, perms in files:
        cfile = certs_path + fname
        old_hash = file_hash(cfile)
        save_file(cfile, data, perms=perms)
        changed |= (old_hash != file_hash(cfile))
        # re-create symlink to common place for contrail-status
        _try_os(os.remove, "/etc/contrail/ssl" + fname)
        _try_os(os.symlink, cfile, "/etc/contrail/ssl" + fname)
    return changed
示例#6
0
def render_filebeat_template():
    """Create the filebeat.yaml config file.

    Renders the appropriate template for the major version of filebeat that
    is installed.
    """
    # kube_logs requires access to k8s-related filesystem data. If configured,
    # don't try to start filebeat until that data is present.
    if config().get('kube_logs') and not os.path.exists(KUBE_CONFIG):
        status.maint('Waiting for: {}'.format(KUBE_CONFIG))
        return

    version = charms.apt.get_package_version('filebeat')[0]
    cfg_original_hash = file_hash(FILEBEAT_CONFIG)
    connections = render_without_context('filebeat-{}.yml'.format(version),
                                         FILEBEAT_CONFIG)
    cfg_new_hash = file_hash(FILEBEAT_CONFIG)

    # Ensure ssl files match config each time we render a new template
    manage_filebeat_logstash_ssl()
    remove_state('beat.render')

    if connections:
        if cfg_original_hash != cfg_new_hash:
            service('restart', 'filebeat')
        status.active('Filebeat ready.')
    else:
        # Stop the service when not connected to any log handlers.
        # NB: beat base layer will handle setting a waiting status
        service('stop', 'filebeat')
def render_config_restart_on_changed(clustered, hosts, bootstrap=False):
    """Render mysql config and restart mysql service if file changes as a
    result.

    If bootstrap is True we do a bootstrap-pxc in order to bootstrap the
    percona cluster. This should only be performed once at cluster creation
    time.

    If percona is already bootstrapped we can get away with just ensuring that
    it is started so long as the new node to be added is guaranteed to have
    been restarted so as to apply the new config.
    """
    config_file = resolve_cnf_file()
    pre_hash = file_hash(config_file)
    render_config(clustered, hosts)
    create_binlogs_directory()
    update_db_rels = False
    if file_hash(config_file) != pre_hash or bootstrap:
        if bootstrap:
            bootstrap_pxc()
            # NOTE(dosaboy): this will not actually do anything if no cluster
            # relation id exists yet.
            notify_bootstrapped()
            update_db_rels = True
        else:
            delay = 1
            attempts = 0
            max_retries = 5
            # NOTE(dosaboy): avoid unnecessary restarts. Once mysql is started
            # it needn't be restarted when new units join the cluster since the
            # new units will join and apply their own config.
            if not seeded():
                action = service_restart
            else:
                action = service_start

            while not action('mysql'):
                if attempts == max_retries:
                    raise Exception("Failed to start mysql (max retries "
                                    "reached)")

                log("Failed to start mysql - retrying in %ss" % (delay),
                    WARNING)
                time.sleep(delay)
                delay += 2
                attempts += 1

        # If we get here we assume prior actions have succeeded to always
        # this unit is marked as seeded so that subsequent calls don't result
        # in a restart.
        mark_seeded()

        if update_db_rels:
            update_shared_db_rels()
    else:
        log("Config file '{}' unchanged".format(config_file), level=DEBUG)
def render_config_restart_on_changed(hosts, bootstrap=False):
    """Render mysql config and restart mysql service if file changes as a
    result.

    If bootstrap is True we do a bootstrap-pxc in order to bootstrap the
    percona cluster. This should only be performed once at cluster creation
    time.

    If percona is already bootstrapped we can get away with just ensuring that
    it is started so long as the new node to be added is guaranteed to have
    been restarted so as to apply the new config.
    """
    config_file = resolve_cnf_file()
    pre_hash = file_hash(config_file)
    render_config(hosts)
    create_binlogs_directory()
    update_db_rels = False
    if file_hash(config_file) != pre_hash or bootstrap:
        if bootstrap:
            bootstrap_pxc()
            # NOTE(dosaboy): this will not actually do anything if no cluster
            # relation id exists yet.
            notify_bootstrapped()
            update_db_rels = True
        else:
            # NOTE(jamespage):
            # if mysql@bootstrap is running, then the native
            # bootstrap systemd service was used to start this
            # instance, and it was the initial seed unit
            # stop the bootstap version before restarting normal mysqld
            if service_running('mysql@bootstrap'):
                service_stop('mysql@bootstrap')

            attempts = 0
            max_retries = 5

            cluster_wait()
            while not service_restart('mysql'):
                if attempts == max_retries:
                    raise Exception("Failed to start mysql (max retries "
                                    "reached)")

                log("Failed to start mysql - retrying per distributed wait",
                    WARNING)
                attempts += 1
                cluster_wait()

        # If we get here we assume prior actions have succeeded to always
        # this unit is marked as seeded so that subsequent calls don't result
        # in a restart.
        mark_seeded()

        if update_db_rels:
            update_client_db_relations()
    else:
        log("Config file '{}' unchanged".format(config_file), level=DEBUG)
def render_config_restart_on_changed(clustered, hosts, bootstrap=False):
    """Render mysql config and restart mysql service if file changes as a
    result.

    If bootstrap is True we do a bootstrap-pxc in order to bootstrap the
    percona cluster. This should only be performed once at cluster creation
    time.

    If percona is already bootstrapped we can get away with just ensuring that
    it is started so long as the new node to be added is guaranteed to have
    been restarted so as to apply the new config.
    """
    pre_hash = file_hash(resolve_cnf_file())
    render_config(clustered, hosts)
    create_binlogs_directory()
    update_db_rels = False
    if file_hash(resolve_cnf_file()) != pre_hash or bootstrap:
        if bootstrap:
            service('stop', 'mysql')
            service('bootstrap-pxc', 'mysql')
            # NOTE(dosaboy): this will not actually do anything if no cluster
            # relation id exists yet.
            notify_bootstrapped()
            update_db_rels = True
        else:
            delay = 1
            attempts = 0
            max_retries = 5
            # NOTE(dosaboy): avoid unnecessary restarts. Once mysql is started
            # it needn't be restarted when new units join the cluster since the
            # new units will join and apply their own config.
            if not seeded():
                action = service_restart
            else:
                action = service_start

            while not action('mysql'):
                if attempts == max_retries:
                    raise Exception("Failed to start mysql (max retries "
                                    "reached)")

                log("Failed to start mysql - retrying in %ss" % (delay),
                    WARNING)
                time.sleep(delay)
                delay += 2
                attempts += 1

        # If we get here we assume prior actions have succeeded to always
        # this unit is marked as seeded so that subsequent calls don't result
        # in a restart.
        mark_seeded()

        if update_db_rels:
            update_shared_db_rels()
    else:
        log("Config file '%s' unchanged", level=DEBUG)
def render_config_restart_on_changed(hosts, bootstrap=False):
    """Render mysql config and restart mysql service if file changes as a
    result.

    If bootstrap is True we do a bootstrap-pxc in order to bootstrap the
    percona cluster. This should only be performed once at cluster creation
    time.

    If percona is already bootstrapped we can get away with just ensuring that
    it is started so long as the new node to be added is guaranteed to have
    been restarted so as to apply the new config.
    """
    config_file = resolve_cnf_file()
    pre_hash = file_hash(config_file)
    render_config(hosts)
    create_binlogs_directory()
    update_db_rels = False
    if file_hash(config_file) != pre_hash or bootstrap:
        if bootstrap:
            bootstrap_pxc()
            # NOTE(dosaboy): this will not actually do anything if no cluster
            # relation id exists yet.
            notify_bootstrapped()
            update_db_rels = True
        else:
            # NOTE(jamespage):
            # if mysql@bootstrap is running, then the native
            # bootstrap systemd service was used to start this
            # instance, and it was the initial seed unit
            # stop the bootstap version before restarting normal mysqld
            if service_running('mysql@bootstrap'):
                service_stop('mysql@bootstrap')

            attempts = 0
            max_retries = 5

            cluster_wait()
            while not service_restart('mysql'):
                if attempts == max_retries:
                    raise Exception("Failed to start mysql (max retries "
                                    "reached)")

                log("Failed to start mysql - retrying per distributed wait",
                    WARNING)
                attempts += 1
                cluster_wait()

        # If we get here we assume prior actions have succeeded to always
        # this unit is marked as seeded so that subsequent calls don't result
        # in a restart.
        mark_seeded()

        if update_db_rels:
            update_client_db_relations()
    else:
        log("Config file '{}' unchanged".format(config_file), level=DEBUG)
示例#11
0
 def wrapped_f(*args):
     checksums = {}
     for path in restart_map:
         checksums[path] = file_hash(path)
     f(*args)
     restarts = []
     for path in restart_map:
         if checksums[path] != file_hash(path):
             restarts += restart_map[path]
     services_list = list(OrderedDict.fromkeys(restarts))
     for s_name in services_list:
         func(s_name)
示例#12
0
def apply_keystone_ca(module, ctx):
    ks_ca_path = "/etc/contrail/ssl/{}/keystone-ca-cert.pem".format(module)
    ks_ca_hash = file_hash(ks_ca_path)
    ks_ca = ctx.get("keystone_ssl_ca")
    save_file(ks_ca_path, ks_ca, 0o444)
    ks_ca_hash_new = file_hash(ks_ca_path)
    if ks_ca:
        ctx["keystone_ssl_ca_path"] = "/etc/contrail/ssl/keystone-ca-cert.pem"
    ca_changed = (ks_ca_hash != ks_ca_hash_new)
    if ca_changed:
        log("Keystone CA cert has been changed: {h1} != {h2}"
            .format(h1=ks_ca_hash, h2=ks_ca_hash_new))
    return ca_changed
def update_certificates(cert, key, ca):
    # NOTE: store files in default paths cause no way to pass this path to
    # some of components (sandesh)
    files = {"/etc/contrailctl/ssl/server.pem": cert,
             "/etc/contrailctl/ssl/server-privkey.pem": key,
             "/etc/contrailctl/ssl/ca-cert.pem": ca}
    changed = False
    for cfile in files:
        data = files[cfile]
        old_hash = file_hash(cfile)
        save_file(cfile, data)
        changed |= (old_hash != file_hash(cfile))

    return changed
示例#14
0
    def configure_examples(self):
        """
        Install sparkpi.sh and sample data to /home/ubuntu.

        The sparkpi.sh script demonstrates spark-submit with the SparkPi class
        included with Spark. This small script is packed into the spark charm
        source in the ./scripts subdirectory.

        The sample data is used for benchmarks (only PageRank for now). This
        may grow quite large in the future, so we utilize Juju Resources for
        getting this data onto the unit. Sample data originated as follows:

        - PageRank: https://snap.stanford.edu/data/web-Google.html
        """
        # Handle sparkpi.sh
        script_source = 'scripts/sparkpi.sh'
        script_path = Path(script_source)
        if script_path.exists():
            script_target = '/home/ubuntu/sparkpi.sh'
            new_hash = host.file_hash(script_source)
            old_hash = unitdata.kv().get('sparkpi.hash')
            if new_hash != old_hash:
                hookenv.log('Installing SparkPi script')
                script_path.copy(script_target)
                Path(script_target).chmod(0o755)
                Path(script_target).chown('ubuntu', 'hadoop')
                unitdata.kv().set('sparkpi.hash', new_hash)
                hookenv.log('SparkPi script was installed successfully')

        # Handle sample data
        sample_source = hookenv.resource_get('sample-data')
        sample_path = sample_source and Path(sample_source)
        if sample_path and sample_path.exists() and sample_path.stat().st_size:
            sample_target = '/home/ubuntu'
            new_hash = host.file_hash(sample_source)
            old_hash = unitdata.kv().get('sample-data.hash')
            if new_hash != old_hash:
                hookenv.log('Extracting Spark sample data')
                # Extract the sample data; since sample data does not impact
                # functionality, log any extraction error but don't fail.
                try:
                    archive.extract(sample_path, destpath=sample_target)
                except Exception:
                    hookenv.log(
                        'Unable to extract Spark sample data: {}'.format(
                            sample_path))
                else:
                    unitdata.kv().set('sample-data.hash', new_hash)
                    hookenv.log('Spark sample data was extracted successfully')
示例#15
0
        def wrapped_f(*args, **kwargs):
            checksums = {}
            for path in COROSYNC_CONF_FILES:
                checksums[path] = file_hash(path)
            return_data = f(*args, **kwargs)
            # NOTE: this assumes that this call is always done around
            # configure_corosync, which returns true if configuration
            # files where actually generated
            if return_data:
                for path in COROSYNC_CONF_FILES:
                    if checksums[path] != file_hash(path):
                        validated_restart_corosync()
                        break

            return return_data
示例#16
0
        def wrapped_f(*args, **kwargs):
            checksums = {}
            for path in COROSYNC_CONF_FILES:
                checksums[path] = file_hash(path)
            return_data = f(*args, **kwargs)
            # NOTE: this assumes that this call is always done around
            # configure_corosync, which returns true if configuration
            # files where actually generated
            if return_data:
                for path in COROSYNC_CONF_FILES:
                    if checksums[path] != file_hash(path):
                        validated_restart_corosync()
                        break

            return return_data
示例#17
0
    def configure_examples(self):
        """
        Install sparkpi.sh and sample data to /home/ubuntu.

        The sparkpi.sh script demonstrates spark-submit with the SparkPi class
        included with Spark. This small script is packed into the spark charm
        source in the ./scripts subdirectory.

        The sample data is used for benchmarks (only PageRank for now). This
        may grow quite large in the future, so we utilize Juju Resources for
        getting this data onto the unit. Sample data originated as follows:

        - PageRank: https://snap.stanford.edu/data/web-Google.html
        """
        # Handle sparkpi.sh
        script_source = 'scripts/sparkpi.sh'
        script_path = Path(script_source)
        if script_path.exists():
            script_target = '/home/ubuntu/sparkpi.sh'
            new_hash = host.file_hash(script_source)
            old_hash = unitdata.kv().get('sparkpi.hash')
            if new_hash != old_hash:
                hookenv.log('Installing SparkPi script')
                script_path.copy(script_target)
                Path(script_target).chmod(0o755)
                Path(script_target).chown('ubuntu', 'hadoop')
                unitdata.kv().set('sparkpi.hash', new_hash)
                hookenv.log('SparkPi script was installed successfully')

        # Handle sample data
        sample_source = hookenv.resource_get('sample-data')
        sample_path = sample_source and Path(sample_source)
        if sample_path and sample_path.exists() and sample_path.stat().st_size:
            sample_target = '/home/ubuntu'
            new_hash = host.file_hash(sample_source)
            old_hash = unitdata.kv().get('sample-data.hash')
            if new_hash != old_hash:
                hookenv.log('Extracting Spark sample data')
                # Extract the sample data; since sample data does not impact
                # functionality, log any extraction error but don't fail.
                try:
                    archive.extract(sample_path, destpath=sample_target)
                except Exception:
                    hookenv.log('Unable to extract Spark sample data: {}'
                                .format(sample_path))
                else:
                    unitdata.kv().set('sample-data.hash', new_hash)
                    hookenv.log('Spark sample data was extracted successfully')
示例#18
0
def download_archive():
    check_call(['apt-get', 'install', '-qy', 'unzip'])
    config = hookenv.config()
    call(['rm', '/tmp/ghost.zip'])
    cmd = ('wget', '-q', '-O', '/tmp/ghost.zip',
           'https://ghost.org/zip/ghost-{}.zip'.format(config['release']))
    hookenv.log("Downloading Ghost from: {}".format(' '.join(cmd)))
    check_call(cmd)

    if host.file_hash('/tmp/ghost.zip', 'sha256') != config['checksum']:
        hookenv.status_set(
            'blocked',
            'downloaded ghost checksums do not match, '
            'possible corrupt file!')
        sys.exit(0)

    # delete the app dir contents (but not the dir itself)
    dist_dir = node_dist_dir()
    for entry in listdir(dist_dir):
        if path.isfile(entry):
            unlink(entry)
        elif path.isdir(entry):
            rmtree(entry)

    cmd = ('unzip', '-uo', '/tmp/ghost.zip', '-d', dist_dir)
    hookenv.log("Extracting Ghost: {}".format(' '.join(cmd)))
    check_call(cmd)
示例#19
0
 def test_file_hash_missing(self, exists):
     filename = '/etc/missing.conf'
     exists.side_effect = [False]
     with patch_open() as (mock_open, mock_file):
         mock_file.read.return_value = self._hash_files[filename]
         result = host.file_hash(filename)
         self.assertEqual(result, None)
示例#20
0
 def update_pools(self):
     # designate-manage communicates with designate via message bus so no
     # need to set OS_ vars
     # NOTE(AJK) this runs with every hook (once most relations are up) and
     # so if it fails it will be picked up by the next relation change or
     # update-status.  i.e. it will heal eventually.
     if hookenv.is_leader():
         try:
             cmd = "designate-manage pool update"
             # Note(tinwood) that this command may fail if the pools.yaml
             # doesn't actually contain any pools.  This happens when the
             # relation is broken, which errors out the charm.  This stops
             # this happening and logs the error.
             subprocess.check_call(cmd.split(), timeout=60)
             # Update leader db to trigger restarts
             hookenv.leader_set(
                 {'pool-yaml-hash': host.file_hash(POOLS_YAML)})
         except subprocess.CalledProcessError as e:
             hookenv.log("designate-manage pool update failed: {}".format(
                 str(e)))
         except subprocess.TimeoutExpired as e:
             # the timeout is if the rabbitmq server has gone away; it just
             # retries continuously; this lets the hook complete.
             hookenv.log(
                 "designate-manage pool command timed out: {}".format(
                     str(e)))
示例#21
0
 def update_pools(self):
     # designate-manage communicates with designate via message bus so no
     # need to set OS_ vars
     # NOTE(AJK) this runs with every hook (once most relations are up) and
     # so if it fails it will be picked up by the next relation change or
     # update-status.  i.e. it will heal eventually.
     if hookenv.is_leader():
         try:
             cmd = "designate-manage pool update"
             # Note(tinwood) that this command may fail if the pools.yaml
             # doesn't actually contain any pools.  This happens when the
             # relation is broken, which errors out the charm.  This stops
             # this happening and logs the error.
             subprocess.check_call(cmd.split(), timeout=60)
             # Update leader db to trigger restarts
             hookenv.leader_set(
                 {'pool-yaml-hash': host.file_hash(POOLS_YAML)})
         except subprocess.CalledProcessError as e:
             hookenv.log("designate-manage pool update failed: {}"
                         .format(str(e)))
         except subprocess.TimeoutExpired as e:
             # the timeout is if the rabbitmq server has gone away; it just
             # retries continuously; this lets the hook complete.
             hookenv.log("designate-manage pool command timed out: {}".
                         format(str(e)))
示例#22
0
    def contents_match(self, path):
        """Determines if the file content is the same.

        This is determined by comparing hashsum of the file contents and
        the saved hashsum. If there is no hashsum, then the content cannot
        be sure to be the same so treat them as if they are not the same.
        Otherwise, return True if the hashsums are the same, False if they
        are not the same.

        :param path: the file to check.
        """
        checksum = file_hash(path)

        kv = unitdata.kv()
        stored_checksum = kv.get('hardening:%s' % path)
        if not stored_checksum:
            # If the checksum hasn't been generated, return False to ensure
            # the file is written and the checksum stored.
            log('Checksum for %s has not been calculated.' % path, level=DEBUG)
            return False
        elif stored_checksum != checksum:
            log('Checksum mismatch for %s.' % path, level=DEBUG)
            return False

        return True
示例#23
0
    def templates_match(self, path):
        """Determines if the template files are the same.

        The template file equality is determined by the hashsum of the
        template files themselves. If there is no hashsum, then the content
        cannot be sure to be the same so treat it as if they changed.
        Otherwise, return whether or not the hashsums are the same.

        :param path: the path to check
        :returns: boolean
        """
        template_path = get_template_path(self.template_dir, path)
        key = 'hardening:template:%s' % template_path
        template_checksum = file_hash(template_path)
        kv = unitdata.kv()
        stored_tmplt_checksum = kv.get(key)
        if not stored_tmplt_checksum:
            kv.set(key, template_checksum)
            kv.flush()
            log('Saved template checksum for %s.' % template_path,
                level=DEBUG)
            # Since we don't have a template checksum, then assume it doesn't
            # match and return that the template is different.
            return False
        elif stored_tmplt_checksum != template_checksum:
            kv.set(key, template_checksum)
            kv.flush()
            log('Updated template checksum for %s.' % template_path,
                level=DEBUG)
            return False

        # Here the template hasn't changed based upon the calculated
        # checksum of the template and what was previously stored.
        return True
示例#24
0
def copy_profile_into_place():
    """
    Copy the apparmor profiles included with the charm
    into the /etc/apparmor.d directory.

    File are only copied if they have changed at source
    to avoid overwriting any aa-complain mode flags set

    :returns: flag indicating if any profiles where newly
              installed or changed
    :rtype: boolean
    """
    db = kv()
    changes = False
    apparmor_dir = os.path.join(os.sep, 'etc', 'apparmor.d')
    for x in glob.glob('files/apparmor/*'):
        db_key = 'hash:{}'.format(x)
        new_hash = file_hash(x)
        previous_hash = db.get(db_key)
        if new_hash != previous_hash:
            log('Installing apparmor profile for {}'.format(
                os.path.basename(x)))
            shutil.copy(x, apparmor_dir)
            db.set(db_key, new_hash)
            db.flush()
            changes = True
    return changes
示例#25
0
def copy_profile_into_place():
    """
    Copy the apparmor profiles included with the charm
    into the /etc/apparmor.d directory.

    File are only copied if they have changed at source
    to avoid overwriting any aa-complain mode flags set

    :returns: flag indicating if any profiles where newly
              installed or changed
    :rtype: boolean
    """
    db = kv()
    changes = False
    apparmor_dir = os.path.join(os.sep, 'etc', 'apparmor.d')
    for x in glob.glob('files/apparmor/*'):
        db_key = 'hash:{}'.format(x)
        new_hash = file_hash(x)
        previous_hash = db.get(db_key)
        if new_hash != previous_hash:
            log('Installing apparmor profile for {}'
                .format(os.path.basename(x)))
            shutil.copy(x, apparmor_dir)
            db.set(db_key, new_hash)
            db.flush()
            changes = True
    return changes
示例#26
0
 def __call__(self, manager, service_name, event_name):
     pre_checksum = ''
     if self.on_change_action and os.path.isfile(self.target):
         pre_checksum = host.file_hash(self.target)
     service = manager.get_service(service_name)
     context = {}
     for ctx in service.get('required_data', []):
         context.update(ctx)
     templating.render(self.source, self.target, context,
                       self.owner, self.group, self.perms)
     if self.on_change_action:
         if pre_checksum == host.file_hash(self.target):
             hookenv.log(
                 'No change detected: {}'.format(self.target),
                 hookenv.DEBUG)
         else:
             self.on_change_action()
示例#27
0
 def test_file_hash_sha1(self, exists, sha1):
     filename = '/etc/exists.conf'
     exists.side_effect = [True]
     m = sha1()
     m.hexdigest.return_value = self._hash_files[filename]
     with patch_open() as (mock_open, mock_file):
         mock_file.read.return_value = self._hash_files[filename]
         result = host.file_hash(filename, hash_type='sha1')
         self.assertEqual(result, self._hash_files[filename])
示例#28
0
    def save_checksum(self, path):
        """Calculates and saves the checksum for the path specified.

        :param path: the path of the file to save the checksum.
        """
        checksum = file_hash(path)
        kv = unitdata.kv()
        kv.set('hardening:%s' % path, checksum)
        kv.flush()
 def any_changed():
     changed = False
     for filename in filenames:
         old_hash = unitdata.kv().get('reactive.when_file_changed.%s' % filename)
         new_hash = host.file_hash(filename, hash_type=kwargs.get('hash_type', 'md5'))
         if old_hash != new_hash:
             unitdata.kv().set('reactive.when_file_changed.%s' % filename, new_hash)
             changed = True  # mark as changed, but keep updating hashes
     return changed
示例#30
0
def render_nimsoft_robot_config():
    """
    Create the required config files.

    Renders the appropriate template for the Nimbus Robot
    """
    # The v5 template is compatible with all versions < 6
    cfg_original_hash = file_hash(ROBOT_CONFIG)
    context = {
        'hub': config("hub"),
        'domain': config("domain"),
        'hubip': config("hubip"),
        'hub_robot_name': config("hub-robot-name"),
        'secondary_domain': config("secondary-domain"),
        'secondary_hubip': config("secondary-hubip"),
        'secondary_hub': config("secondary-hub"),
        'secondary_hub_robot_name': config("secondary-hub-robot-name"),
        'private_address': unit_private_ip(),
        'hostname': os.uname()[1],
        'aa_profile_mode': config("aa-profile-mode")
    }

    # Render robot.cfg
    render(ROBOT_CONFIG, ROBOT_CONFIG_PATH, context=context)
    cfg_new_hash = file_hash(ROBOT_CONFIG)

    # Render request.cfg
    render(DIST_REQ, DIST_REQ_PATH, context=context)

    # Install the nimbus service
    rsync(charm_dir() + '/files/nimbus.service',
          '/lib/systemd/system/nimbus.service')

    # Render AppArmor profile
    render(NIMBUS_AA_PROFILE, NIMBUS_AA_PROFILE_PATH, context=context)

    # Set AppArmor context
    NimbusAppArmorContext().setup_aa_profile()

    if cfg_original_hash != cfg_new_hash:
        service('restart', 'nimbus')

    hookenv.status_set('active', 'ready')
def tls_changed(cert, key, ca):
    files = {"/etc/contrail/ssl/certs/server.pem": cert,
             "/etc/contrail/ssl/private/server-privkey.pem": key,
             "/etc/contrail/ssl/certs/ca-cert.pem": ca}
    changed = False
    for cfile in files:
        data = files[cfile]
        old_hash = file_hash(cfile)
        _save_file(cfile, data)
        changed |= (old_hash != file_hash(cfile))

    if not changed:
        log("Certificates was not changed.")
        return

    log("Certificates was changed. Rewrite configs and rerun services.")
    config["ssl_enabled"] = (cert is not None and len(cert) > 0)
    config.save()
    write_configs()
    service_restart("contrail-vrouter-agent")
    service_restart("contrail-vrouter-nodemgr")
def render_and_check(ctx, template, conf_file, do_check):
    """Returns True if configuration has been changed."""

    log("Render and store new configuration: " + conf_file)
    if do_check:
        try:
            with open(conf_file) as f:
                old_lines = set(f.readlines())
        except Exception:
            old_lines = set()

    ks_ca_path = "/etc/contrailctl/keystone-ca-cert.pem"
    ks_ca_hash = file_hash(ks_ca_path) if do_check else None
    ks_ca = ctx.get("keystone_ssl_ca")
    save_file(ks_ca_path, ks_ca, 0o444)
    ks_ca_hash_new = file_hash(ks_ca_path)
    if ks_ca:
        ctx["keystone_ssl_ca_path"] = ks_ca_path
    ca_changed = (ks_ca_hash != ks_ca_hash_new) if do_check else False
    if ca_changed:
        log("Keystone CA cert has been changed: {h1} != {h2}"
            .format(h1=ks_ca_hash, h2=ks_ca_hash_new))

    render(template, conf_file, ctx)
    if not do_check:
        return True
    with open(conf_file) as f:
        new_lines = set(f.readlines())
    new_set = new_lines.difference(old_lines)
    old_set = old_lines.difference(new_lines)
    if new_set or old_set:
        log("New lines:\n{new}".format(new="".join(new_set)))
        log("Old lines:\n{old}".format(old="".join(old_set)))
        log("Configuration file has been changed.")
    else:
        log("Configuration file has not been changed.")
    return ca_changed or new_set or old_set
示例#33
0
def render_filebeat_template():
    """Create the filebeat.yaml config file.

    Renders the appropriate template for the major version of filebeat that
    is installed.
    """
    # kube_logs requires access to a kubeconfig. If configured, log whether or
    # not we have enough to start collecting k8s metadata.
    if config().get('kube_logs'):
        if os.path.exists(KUBE_CONFIG):
            msg = 'Collecting k8s metadata.'
        else:
            msg = ('kube_logs=True, but {} does not exist. '
                   'No k8s metadata will be collected.'.format(KUBE_CONFIG))
        log(msg)

    # The v5 template is compatible with all versions < 6
    major = charms.apt.get_package_version('filebeat')[0]
    version = major if major.isdigit() and int(major) > 5 else "5"
    cfg_original_hash = file_hash(FILEBEAT_CONFIG)
    connections = render_without_context('filebeat-{}.yml'.format(version),
                                         FILEBEAT_CONFIG)
    cfg_new_hash = file_hash(FILEBEAT_CONFIG)

    # Ensure ssl files match config each time we render a new template
    manage_filebeat_logstash_ssl()
    remove_state('beat.render')

    if connections:
        if cfg_original_hash != cfg_new_hash:
            service('restart', 'filebeat')
        status.active('Filebeat ready.')
    else:
        # Stop the service when not connected to any log handlers.
        # NB: beat base layer will handle setting a waiting status
        service('stop', 'filebeat')
示例#34
0
def any_file_changed(filenames, hash_type='md5'):
    """
    Check if any of the given files have changed since the last time this
    was called.

    :param list filenames: Names of files to check.
    :param str hash_type: Algorithm to use to check the files.
    """
    changed = False
    for filename in filenames:
        old_hash = unitdata.kv().get('reactive.files_changed.%s' % filename)
        new_hash = host.file_hash(filename, hash_type=hash_type)
        if old_hash != new_hash:
            unitdata.kv().set('reactive.files_changed.%s' % filename, new_hash)
            changed = True  # mark as changed, but keep updating hashes
    return changed
示例#35
0
def download_archive():
    check_call(['apt-get', 'install', '-qy', 'unzip'])
    config = hookenv.config()
    ghost_source = hookenv.resource_get('ghost-stable')
    ghost_source_checksum = host.file_hash(ghost_source, 'sha256')
    if config.get('checksum', 0) == ghost_source_checksum:
        hookenv.log("Checksums match no need to extract source archive.")
        return

    kv.set('checksum', ghost_source_checksum)

    # delete the app dir contents (but not the dir itself)
    dist_dir = node_dist_dir()
    for entry in listdir(dist_dir):
        if path.isfile(entry):
            unlink(entry)
        elif path.isdir(entry):
            rmtree(entry)

    cmd = ('unzip', '-uo', ghost_source, '-d', dist_dir)
    hookenv.log("Extracting Ghost: {}".format(' '.join(cmd)))
    check_call(cmd)
    def fetch_bigtop_release(self):
        """
        Unpack or clone the Bigtop repo.

        This will fetch the upstream source needed to deploy Bigtop
        applications. To support restricted networks where git cloning may
        not be possible, this method will first try to unpack the attached
        bigtop-repo resource. If this does not exist, it will fall back to
        cloning the upstream repo with an appropriate branch.

        The source will be availabe in the bigtop_base directory.
        """
        hookenv.status_set('maintenance', 'fetching bigtop source')
        Path(self.bigtop_base).rmtree_p()

        filename = hookenv.resource_get('bigtop-repo')
        filepath = filename and Path(filename)
        if filepath and filepath.exists() and filepath.stat().st_size:
            new_hash = file_hash(filename)
            old_hash = unitdata.kv().get('bigtop-repo.hash')
            if new_hash != old_hash:
                hookenv.status_set('maintenance', 'unzipping bigtop-repo')
                with chdir(filepath.dirname()):
                    try:
                        # NB: we cannot use the payload.archive helper because
                        # it relies on Zipfile.extractall, which doesn't
                        # preserve perms (https://bugs.python.org/issue15795).
                        # Subprocess an unzip the 'ol fashioned way.
                        utils.run_as('root', 'unzip', '-qo', filepath)
                    except subprocess.CalledProcessError as e:
                        hookenv.status_set('blocked',
                                           'failed to unzip bigtop-repo')
                        raise BigtopError(
                            u"Failed to unzip {}: {}".format(filepath, e))
                    else:
                        # We may not know the name of the archive's subdirs,
                        # but we always want to treat the dir with bigtop.bom
                        # as the source root dir. Copy this tree to bigtop_base.
                        for dirpath, dirs, files in os.walk(filepath.dirname()):
                            for name in files:
                                if name == 'bigtop.bom':
                                    Path(dirpath).copytree(
                                        self.bigtop_base, symlinks=True)
                                    break
                    unitdata.kv().set('bigtop-repo.hash', new_hash)
            else:
                hookenv.log('Resource bigtop-repo is unchanged')
        else:
            hookenv.status_set('maintenance', 'cloning bigtop repo')
            bigtop_repo = 'https://github.com/apache/bigtop.git'
            if self.bigtop_version == '1.1.0':
                bigtop_branch = 'branch-1.1'
            elif self.bigtop_version.startswith('1.2'):
                bigtop_branch = 'branch-1.2'
            elif self.bigtop_version.startswith('1.3'):
                bigtop_branch = 'branch-1.3'
            elif self.bigtop_version == 'master':
                bigtop_branch = 'master'
            else:
                raise BigtopError(
                    u"Unknown Bigtop version for repo branch: {}".format(self.bigtop_version))

            # NB: we cannot use the fetch.install_remote helper because that
            # relies on the deb-only python3-apt package. Subordinates cannot
            # install deb dependencies into their venv, so to ensure bigtop
            # subordinate charms succeed, subprocess the required git clone.
            try:
                utils.run_as('root', 'git', 'clone', bigtop_repo,
                             '--branch', bigtop_branch, '--single-branch',
                             self.bigtop_base)
            except subprocess.CalledProcessError as e:
                hookenv.status_set('blocked', 'failed to clone bigtop repo')
                raise BigtopError(
                    u"Failed to clone {}: {}".format(bigtop_repo, e))

        # Make sure the repo looks like we expect
        if Path(self.bigtop_base / 'bigtop.bom').exists():
            hookenv.status_set('waiting', 'bigtop source fetched')
        else:
            hookenv.status_set('blocked', 'invalid bigtop source')
            raise BigtopError(
                u"Unrecognized source repo in {}".format(self.bigtop_base))
示例#37
0
文件: spark.py 项目: apache/bigtop
def reinstall_spark(force=False):
    """
    Gather the state of our deployment and (re)install when leaders, hadoop,
    sparkpeers, or zookeepers change. In the future this should also
    fire when Cassandra or any other storage comes or goes. Config changed
    events will also call this method, but that is invoked with a separate
    handler below.

    Use a deployment-matrix dict to track changes and (re)install as needed.
    """
    spark_master_host = leadership.leader_get('master-fqdn')
    if not spark_master_host:
        hookenv.status_set('maintenance', 'juju leader not elected yet')
        return

    mode = hookenv.config()['spark_execution_mode']
    peers = None
    zks = None

    # If mode is standalone and ZK is ready, we are in HA. Do not consider
    # the master_host from juju leadership in our matrix. ZK handles this.
    if (mode == 'standalone' and is_state('zookeeper.ready')):
        spark_master_host = ''
        zk = RelationBase.from_state('zookeeper.ready')
        zks = zk.zookeepers()
        # peers are only used to set our MASTER_URL in standalone HA mode
        peers = get_spark_peers()

    # Construct a deployment matrix
    sample_data = hookenv.resource_get('sample-data')
    deployment_matrix = {
        'hdfs_ready': is_state('hadoop.hdfs.ready'),
        'peers': peers,
        'sample_data': host.file_hash(sample_data) if sample_data else None,
        'spark_master': spark_master_host,
        'yarn_ready': is_state('hadoop.yarn.ready'),
        'zookeepers': zks,
    }

    # No-op if we are not forcing a reinstall or our matrix is unchanged.
    if not (force or data_changed('deployment_matrix', deployment_matrix)):
        report_status()
        return

    # (Re)install based on our execution mode
    hookenv.status_set('maintenance', 'configuring spark in {} mode'.format(mode))
    hookenv.log("Configuring spark with deployment matrix: {}".format(deployment_matrix))

    if mode.startswith('yarn') and is_state('hadoop.yarn.ready'):
        install_spark_yarn()
    elif mode.startswith('local') or mode == 'standalone':
        install_spark_standalone(zks, peers)
    else:
        # Something's wrong (probably requested yarn without yarn.ready).
        remove_state('spark.started')
        report_status()
        return

    # restart services to pick up possible config changes
    spark = Spark()
    spark.stop()
    spark.start()

    set_state('spark.started')
    report_status()
示例#38
0
    def __call__(self):
        if not config('neutron-plugin') == 'Calico':
            return {}

        for rid in relation_ids('etcd-proxy'):
            for unit in related_units(rid):
                rdata = relation_get(rid=rid, unit=unit)
                cluster_string = rdata.get('cluster')
                client_cert = rdata.get('client_cert')
                client_key = rdata.get('client_key')
                client_ca = rdata.get('client_ca')
                if cluster_string and client_cert and client_key and client_ca:
                    # We have all the information we need to run an etcd proxy,
                    # so we could generate and return a complete context.
                    #
                    # However, we don't need to restart the etcd proxy if it is
                    # already running, if there is overlap between the new
                    # 'cluster_string' and the peers that the proxy is already
                    # aware of, and if the TLS credentials are the same as the
                    # proxy already has.
                    #
                    # So, in this block of code we determine whether the etcd
                    # proxy needs to be restarted.  If it doesn't, we return a
                    # null context.  If it does, we generate and return a
                    # complete context with the information needed to do that.

                    # First determine the peers that the existing etcd proxy is
                    # aware of.
                    existing_peers = set([])
                    try:
                        peer_info = subprocess.check_output(
                            ['etcdctl', '--no-sync', 'member', 'list'])
                        for line in peer_info.split('\n'):
                            m = re.search('name=([^ ]+) peerURLs=([^ ]+)',
                                          line)
                            if m:
                                existing_peers.add('%s=%s' %
                                                   (m.group(1), m.group(2)))
                    except:
                        # Probably this means that the proxy was not already
                        # running.  We treat this the same as there being no
                        # existing peers.
                        log('"etcdctl --no-sync member list" call failed')

                    log('Existing etcd peers: %r' % existing_peers)

                    # Now get the peers indicated by the new cluster_string.
                    new_peers = set(cluster_string.split(','))
                    log('New etcd peers: %r' % new_peers)

                    if new_peers & existing_peers:
                        # New and existing peers overlap, so we probably don't
                        # need to restart the etcd proxy.  But check in case
                        # the TLS credentials have changed.
                        log('New and existing etcd peers overlap')

                        existing_cred_hash = (
                            (file_hash('/etc/neutron-api/etcd_cert') or '?') +
                            (file_hash('/etc/neutron-api/etcd_key') or '?') +
                            (file_hash('/etc/neutron-api/etcd_ca') or '?'))
                        log('Existing credentials: %s' % existing_cred_hash)

                        new_cred_hash = (data_hash(client_cert) +
                                         data_hash(client_key) +
                                         data_hash(client_ca))
                        log('New credentials: %s' % new_cred_hash)

                        if new_cred_hash == existing_cred_hash:
                            log('TLS credentials unchanged')
                            return {}

                    # We need to start or restart the etcd proxy, so generate a
                    # context with the new cluster string and TLS credentials.
                    return {
                        'cluster':
                        cluster_string,
                        'server_certificate':
                        self._save_data(client_cert,
                                        '/etc/neutron-api/etcd_cert'),
                        'server_key':
                        self._save_data(client_key,
                                        '/etc/neutron-api/etcd_key'),
                        'ca_certificate':
                        self._save_data(client_ca, '/etc/neutron-api/etcd_ca')
                    }

        return {}
    def __call__(self):
        if not config('neutron-plugin') == 'Calico':
            return {}

        for rid in relation_ids('etcd-proxy'):
            for unit in related_units(rid):
                rdata = relation_get(rid=rid, unit=unit)
                cluster_string = rdata.get('cluster')
                client_cert = rdata.get('client_cert')
                client_key = rdata.get('client_key')
                client_ca = rdata.get('client_ca')
                if cluster_string and client_cert and client_key and client_ca:
                    # We have all the information we need to run an etcd proxy,
                    # so we could generate and return a complete context.
                    #
                    # However, we don't need to restart the etcd proxy if it is
                    # already running, if there is overlap between the new
                    # 'cluster_string' and the peers that the proxy is already
                    # aware of, and if the TLS credentials are the same as the
                    # proxy already has.
                    #
                    # So, in this block of code we determine whether the etcd
                    # proxy needs to be restarted.  If it doesn't, we return a
                    # null context.  If it does, we generate and return a
                    # complete context with the information needed to do that.

                    # First determine the peers that the existing etcd proxy is
                    # aware of.
                    existing_peers = set([])
                    try:
                        peer_info = subprocess.check_output(['etcdctl',
                                                             '--no-sync',
                                                             'member',
                                                             'list'])
                        for line in peer_info.split('\n'):
                            m = re.search('name=([^ ]+) peerURLs=([^ ]+)',
                                          line)
                            if m:
                                existing_peers.add('%s=%s' % (m.group(1),
                                                              m.group(2)))
                    except:
                        # Probably this means that the proxy was not already
                        # running.  We treat this the same as there being no
                        # existing peers.
                        log('"etcdctl --no-sync member list" call failed')

                    log('Existing etcd peers: %r' % existing_peers)

                    # Now get the peers indicated by the new cluster_string.
                    new_peers = set(cluster_string.split(','))
                    log('New etcd peers: %r' % new_peers)

                    if new_peers & existing_peers:
                        # New and existing peers overlap, so we probably don't
                        # need to restart the etcd proxy.  But check in case
                        # the TLS credentials have changed.
                        log('New and existing etcd peers overlap')

                        existing_cred_hash = (
                            (file_hash('/etc/neutron-api/etcd_cert') or '?') +
                            (file_hash('/etc/neutron-api/etcd_key') or '?') +
                            (file_hash('/etc/neutron-api/etcd_ca') or '?')
                        )
                        log('Existing credentials: %s' % existing_cred_hash)

                        new_cred_hash = (
                            data_hash(client_cert) +
                            data_hash(client_key) +
                            data_hash(client_ca)
                        )
                        log('New credentials: %s' % new_cred_hash)

                        if new_cred_hash == existing_cred_hash:
                            log('TLS credentials unchanged')
                            return {}

                    # We need to start or restart the etcd proxy, so generate a
                    # context with the new cluster string and TLS credentials.
                    return {'cluster': cluster_string,
                            'server_certificate':
                            self._save_data(client_cert,
                                            '/etc/neutron-api/etcd_cert'),
                            'server_key':
                            self._save_data(client_key,
                                            '/etc/neutron-api/etcd_key'),
                            'ca_certificate':
                            self._save_data(client_ca,
                                            '/etc/neutron-api/etcd_ca')}

        return {}
示例#40
0
 def notify_kubeconfig_changed(self):
     kubeconfig_hash = file_hash(kubeclientconfig_path)
     for relation in self.relations:
         relation.to_publish_raw.update(
             {"kubeconfig-hash": kubeconfig_hash})
示例#41
0
def reinstall_spark(force=False):
    """
    Gather the state of our deployment and (re)install when leaders, hadoop,
    sparkpeers, or zookeepers change. In the future this should also
    fire when Cassandra or any other storage comes or goes. Config changed
    events will also call this method, but that is invoked with a separate
    handler below.

    Use a deployment-matrix dict to track changes and (re)install as needed.
    """
    spark_master_host = leadership.leader_get('master-fqdn')
    if not spark_master_host:
        hookenv.status_set('maintenance', 'juju leader not elected yet')
        return

    mode = hookenv.config()['spark_execution_mode']
    peers = None
    zks = None

    # If mode is standalone and ZK is ready, we are in HA. Do not consider
    # the master_host from juju leadership in our matrix. ZK handles this.
    if (mode == 'standalone' and is_state('zookeeper.ready')):
        spark_master_host = ''
        zk = RelationBase.from_state('zookeeper.ready')
        zks = zk.zookeepers()
        # peers are only used to set our MASTER_URL in standalone HA mode
        peers = get_spark_peers()

    # Construct a deployment matrix
    sample_data = hookenv.resource_get('sample-data')
    deployment_matrix = {
        'hdfs_ready': is_state('hadoop.hdfs.ready'),
        'peers': peers,
        'sample_data': host.file_hash(sample_data) if sample_data else None,
        'spark_master': spark_master_host,
        'yarn_ready': is_state('hadoop.yarn.ready'),
        'zookeepers': zks,
    }

    # No-op if we are not forcing a reinstall or our matrix is unchanged.
    if not (force or data_changed('deployment_matrix', deployment_matrix)):
        report_status()
        return

    # (Re)install based on our execution mode
    hookenv.status_set('maintenance',
                       'configuring spark in {} mode'.format(mode))
    hookenv.log("Configuring spark with deployment matrix: {}".format(
        deployment_matrix))

    if mode.startswith('yarn') and is_state('hadoop.yarn.ready'):
        install_spark_yarn()
    elif mode.startswith('local') or mode == 'standalone':
        install_spark_standalone(zks, peers)
    else:
        # Something's wrong (probably requested yarn without yarn.ready).
        remove_state('spark.started')
        report_status()
        return

    # restart services to pick up possible config changes
    spark = Spark()
    spark.stop()
    spark.start()

    set_state('spark.started')
    report_status()
    def fetch_bigtop_release(self):
        """
        Unpack or clone the Bigtop repo.

        This will fetch the upstream source needed to deploy Bigtop
        applications. To support restricted networks where git cloning may
        not be possible, this method will first try to unpack the attached
        bigtop-repo resource. If this does not exist, it will fall back to
        cloning the upstream repo with an appropriate branch.

        The source will be availabe in the bigtop_base directory.
        """
        hookenv.status_set('maintenance', 'fetching bigtop source')
        Path(self.bigtop_base).rmtree_p()

        filename = hookenv.resource_get('bigtop-repo')
        filepath = filename and Path(filename)
        if filepath and filepath.exists() and filepath.stat().st_size:
            new_hash = file_hash(filename)
            old_hash = unitdata.kv().get('bigtop-repo.hash')
            if new_hash != old_hash:
                hookenv.status_set('maintenance', 'unzipping bigtop-repo')
                with chdir(filepath.dirname()):
                    try:
                        # NB: we cannot use the payload.archive helper because
                        # it relies on Zipfile.extractall, which doesn't
                        # preserve perms (https://bugs.python.org/issue15795).
                        # Subprocess an unzip the 'ol fashioned way.
                        utils.run_as('root', 'unzip', '-qo', filepath)
                    except subprocess.CalledProcessError as e:
                        hookenv.status_set('blocked',
                                           'failed to unzip bigtop-repo')
                        raise BigtopError(
                            u"Failed to unzip {}: {}".format(filepath, e))
                    else:
                        # We may not know the name of the archive's subdirs,
                        # but we always want to treat the dir with bigtop.bom
                        # as the source root dir. Copy this tree to bigtop_base.
                        for dirpath, dirs, files in os.walk(filepath.dirname()):
                            for name in files:
                                if name == 'bigtop.bom':
                                    Path(dirpath).copytree(
                                        self.bigtop_base, symlinks=True)
                                    break
                    unitdata.kv().set('bigtop-repo.hash', new_hash)
            else:
                hookenv.log('Resource bigtop-repo is unchanged')
        else:
            hookenv.status_set('maintenance', 'cloning bigtop repo')
            bigtop_repo = 'https://github.com/apache/bigtop.git'
            if self.bigtop_version == '1.1.0':
                bigtop_branch = 'branch-1.1'
            elif self.bigtop_version.startswith('1.2'):
                bigtop_branch = 'branch-1.2'
            elif self.bigtop_version == 'master':
                bigtop_branch = 'master'
            else:
                raise BigtopError(
                    u"Unknown Bigtop version for repo branch: {}".format(self.bigtop_version))

            # NB: we cannot use the fetch.install_remote helper because that
            # relies on the deb-only python3-apt package. Subordinates cannot
            # install deb dependencies into their venv, so to ensure bigtop
            # subordinate charms succeed, subprocess the required git clone.
            try:
                utils.run_as('root', 'git', 'clone', bigtop_repo,
                             '--branch', bigtop_branch, '--single-branch',
                             self.bigtop_base)
            except subprocess.CalledProcessError as e:
                hookenv.status_set('blocked', 'failed to clone bigtop repo')
                raise BigtopError(
                    u"Failed to clone {}: {}".format(bigtop_repo, e))

        # Make sure the repo looks like we expect
        if Path(self.bigtop_base / 'bigtop.bom').exists():
            hookenv.status_set('waiting', 'bigtop source fetched')
        else:
            hookenv.status_set('blocked', 'invalid bigtop source')
            raise BigtopError(
                u"Unrecognized source repo in {}".format(self.bigtop_base))