def pull_calicoctl_image(): status.maintenance('Pulling calicoctl image') registry = hookenv.config('registry') or DEFAULT_REGISTRY encoded_creds = hookenv.config('registry-credentials') creds = b64decode(encoded_creds).decode('utf-8') if creds: creds = json.loads(creds) images = { os.path.join(registry, hookenv.config('calico-node-image')): resource_get('calico-node-image'), os.path.join(registry, hookenv.config('calicoctl-image')): resource_get('calicoctl-image') } for name, path in images.items(): if not path or os.path.getsize(path) == 0: status.maintenance('Pulling {} image'.format(name)) if not creds or not creds.get('auths') or \ registry not in creds.get('auths'): CTL.pull(name, ) else: auth = creds['auths'][registry]['auth'] username, password = b64decode(auth).decode('utf-8').split(':') CTL.pull(name, username=username, password=password) else: status.maintenance('Loading {} image'.format(name)) unzipped = '/tmp/calico-node-image.tar' with gzip.open(path, 'rb') as f_in: with open(unzipped, 'wb') as f_out: f_out.write(f_in.read()) CTL.load(unzipped) set_state('calico.image.pulled')
def fetch(self): ''' Get the resources from the controller. Returns: True if the resources are available, False if not. ''' result = resource_get('scaledb-ude') if not result: log("Failed to fetch ScaleDB UDE resource") return False unitdata.kv().set("udedeb", result) log("ScaleDB UDE deb is {}".format(result)) result = resource_get('scaledb-maria') if not result: log("Failed to fetch ScaleDB Maria resource") return False unitdata.kv().set("mariadeb", result) log("ScaleDB Maria deb is {}".format(result)) # Usually you would open some ports so that outside cloud customers # would be able to reach your service. # https://jujucharms.com/docs/2.0/charms-exposing # self.open_ports() return True
def generate_ssh_key(): status_set('maintenance', 'setting rsa keys') config = hookenv.config() keypath = './rsa' privateKey = keypath + '/id_rsa' publicKey = keypath + '/id_rsa.pub' # Create rsa folder try: os.mkdir(keypath) except OSError as e: if e.errno is 17: pass # Get or generate keys if config['use-resource-keys']: private_path = resource_get('private-key') public_path = resource_get('public-key') if private_path and public_path: shutil.copy(private_path, privateKey) shutil.copy(public_path, publicKey) else: log( "Add key resources, see juju attach or disable use-resource-keys", 'ERROR') raise ValueError( 'Key resources missing, see juju attach or disable use-resource-keys' ) else: key = rsa.generate_private_key(backend=crypto_default_backend(), public_exponent=65537, key_size=4096) private_key = key.private_bytes( crypto_serialization.Encoding.PEM, crypto_serialization.PrivateFormat.PKCS8, crypto_serialization.NoEncryption()) public_key = key.public_key().public_bytes( crypto_serialization.Encoding.OpenSSH, crypto_serialization.PublicFormat.OpenSSH) with open(privateKey, 'wb') as file: file.write(private_key) with open(publicKey, 'wb') as file: file.write(public_key) print("Generated RSA key id_rsa.pub: {}".format(repr(public_key))) # Correct permissions os.chmod(privateKey, 0o600) os.chmod(publicKey, 0o600) # Add to ubuntu user shutil.copy(privateKey, '/home/ubuntu/.ssh/id_rsa') shutil.chown('/home/ubuntu/.ssh/id_rsa', user='******', group='ubuntu') shutil.copy(publicKey, '/home/ubuntu/.ssh/id_rsa.pub') shutil.chown('/home/ubuntu/.ssh/id_rsa.pub', user='******', group='ubuntu') set_state('ssh-key.generated')
def install_certificate_authority(): # Resources are used to deliver the CA binary bits cfssl_bin = resource_get('cfssl') cfssl_json_bin = resource_get('cfssljson') if not cfssl_bin or not cfssl_json_bin: status_set('blocked', 'Missing resources. See: README.') return install(cfssl_bin, '/usr/local/bin/cfssl') install(cfssl_json_bin, '/usr/local/bin/cfssljson') set_state('certificate-authority.installed')
def install_cfssl(): """ Install the CFSSL binaries """ status_set('maintenance', 'Installing CFSSL.') # Resources are used to deliver the CA binary bits cfssl_bin = resource_get('cfssl') cfssl_json_bin = resource_get('cfssljson') if not cfssl_bin or not cfssl_json_bin: status_set('blocked', 'Missing resources. See: README.md.') return install(cfssl_bin, '/usr/local/bin/cfssl') install(cfssl_json_bin, '/usr/local/bin/cfssljson') set_state('certificate-authority.installed')
def install_prometheus_exporter_resource(): go_bin = resource_get('node-exporter') if os.path.exists(NODE_EXPORTER_BIN): os.remove(NODE_EXPORTER_BIN) copyfile(go_bin, NODE_EXPORTER_BIN) call('chmod +x {}'.format(NODE_EXPORTER_BIN).split()) set_state('prometheus.node.exporter.bin.available')
def fetch_resources_example(): """ Fetch the resource from Juju.""" print('Fetching resource with resource_get(name="software")'') path = resource_get(name='software') print(path) subprocess.check_call(['sha1sum', path]) set_state('resources-example.fetched')
def install(): conf = hookenv.config() context = get_install_context() gogs_bdist = hookenv.resource_get('bdist') check_call(["tar", "xzf", gogs_bdist], cwd="/opt") # Create gogs user & group add_group(context['group']) adduser(context['user'], system_user=True) for dir in ('.ssh', 'repositories', 'data', 'logs'): os.makedirs( os.path.join(context['home'], dir), mode=0o700, exist_ok=True) os.makedirs(os.path.join(context['home'], 'custom', 'conf'), mode=0o755, exist_ok=True) chownr(context['home'], context['user'], context['group'], True, True) render(source='upstart', target="/etc/init/gogs.conf", perms=0o644, context=context) render(source='gogs.service', target="/lib/systemd/system/gogs.service", perms=0o644, context=context) hookenv.status_set('maintenance', 'installation complete')
def install(self, force=False): """ Override install to handle nested zeppelin dir, and different resource name. """ filename = hookenv.resource_get('insightedge') destination = self.dist_config.path('insightedge') if not filename: return False # failed to fetch if destination.exists() and not force: return True destination.rmtree_p() # if reinstalling extracted = Path(fetch.install_remote('file://' + filename)) extracted.dirs()[0].copytree(destination) # only copy nested dir host.chownr(destination, 'ubuntu', 'root') zd = self.dist_config.path('zeppelin') / 'bin' / 'zeppelin-daemon.sh' zd.chmod('a+x') self.dist_config.add_dirs() self.dist_config.add_packages() return True
def install_cni_plugins(): """Unpack the cni-plugins resource""" hookenv.status_set("maintenance", "Installing CNI plugins") # Get the resource via resource_get try: resource_name = "cni-{}".format(arch()) archive = hookenv.resource_get(resource_name) except Exception: message = "Error fetching the cni resource." hookenv.log(message) return if not archive: hookenv.log("Missing cni resource.") return # Handle null resource publication, we check if filesize < 1mb filesize = os.stat(archive).st_size if filesize < 1000000: hookenv.log("Incomplete cni resource.") return unpack_path = "/opt/cni/bin" os.makedirs(unpack_path, exist_ok=True) cmd = ["tar", "xfvz", archive, "-C", unpack_path] hookenv.log(cmd) check_call(cmd) set_flag("kubernetes.cni-plugins.installed")
def install_nimsoft_robot(): '''Install the nimsoft robot software that is used for LMA.''' nimsoft_robot_resource = None try: # Try to get the resource from Juju. nimsoft_robot_resource = resource_get('nimsoft-robot-package') except Exception as e: message = \ 'An error occurred fetching the nimsoft-robot-package resource.' hookenv.log(message) hookenv.log(e) hookenv.status_set('blocked', message) return if not nimsoft_robot_resource: hookenv.status_set('blocked', 'The nimsoft_robot_resource resource is missing.') return # Handle null resource publication, we check if filesize < 1mb filesize = os.stat(nimsoft_robot_resource).st_size if filesize < 1000000: hookenv.status_set('blocked', 'Incomplete nimsoft_robot_resource resource.') return hookenv.status_set('maintenance', 'Installing nimsoft_robot resource.') cmd = ['dpkg', '-i', nimsoft_robot_resource] hookenv.log(cmd) check_call(cmd) set_flag('nimsoft-robot.installed')
def keytab_path(self): """Path for they keytab file""" keytab_file = hookenv.resource_get('keystone_keytab') if keytab_file: shutil.copy(keytab_file, KEYTAB_DESTINATION_PATH) self._keytab_path = keytab_file return self._keytab_path
def _install_mattermost(): # Backup existing installation if it exists backup_path = None if os.path.isdir('/opt/mattermost'): backup_path = "/opt/mattermost.back{}".format(str(datetime.datetime.now())) shutil.move('/opt/mattermost', backup_path) # Create mattermost user & group if not exists if not group_exists('mattermost'): add_group("mattermost") if not user_exists('mattermost'): adduser("mattermost", system_user=True) # Get and uppack resource mattermost_bdist = resource_get('bdist') extract_tarfile(mattermost_bdist, destpath="/opt") # Render systemd template render(source="mattermost.service.tmpl", target="/etc/systemd/system/mattermost.service", perms=0o644, owner="root", context={}) check_call(['systemctl', 'daemon-reload']) if backup_path: shutil.move( '{}/config/config.json'.format(backup_path), '/opt/mattermost/config/config.json') shutil.move( '{}/data'.format(backup_path), '/opt/mattermost/') # Create dirs that don't exist yet for folder in ("data", "logs", "config"): os.makedirs("/opt/mattermost/{}".format(folder), mode=0o700, exist_ok=True) chownr("/opt/mattermost", "mattermost", "mattermost", chowntopdir=True)
def install_resources(self): """Install Juju application resources """ for resource_name, install_func in self.resource_install_map.items(): resource = hookenv.resource_get(resource_name) if resource: install_func(resource)
def idp_metadata(self): idp_metadata_content = None if self.idp_metadata_url is None: # Get metadata from resource idp_metadata_path = hookenv.resource_get('idp-metadata') if (idp_metadata_path and os.path.exists(idp_metadata_path) and not self._idp_metadata): with open(idp_metadata_path, 'r', encoding='utf-8') as f: idp_metadata_content = f.read() else: self._validation_errors['idp-metadata'] =\ self.IDP_METADATA_NOT_PROVIDED else: try: response = urllib.request.urlopen(self.idp_metadata_url) except urllib.error.URLError as e: self._validation_errors['idp-metadata'] = '{}: {}'.format( self.IDP_METADATA_URL_ERROR, e.reason) return self._idp_metadata encoded_content = response.read() idp_metadata_content = encoded_content.decode("utf-8") # Metadata has been provided either via a resource or downloaded from # the specified URL. if idp_metadata_content is not None: try: etree.fromstring(idp_metadata_content.encode()) self._idp_metadata = idp_metadata_content self._validation_errors['idp-metadata'] = None except etree.XMLSyntaxError: self._idp_metadata = None self._validation_errors['idp-metadata'] = ( self.IDP_METADATA_INVALID) return self._idp_metadata
def install_cni_plugins(): ''' Unpack the cni-plugins resource ''' hookenv.status_set('maintenance', 'Installing CNI plugins') # Get the resource via resource_get try: resource_name = 'cni-{}'.format(arch()) archive = hookenv.resource_get(resource_name) except Exception: message = 'Error fetching the cni resource.' hookenv.log(message) return if not archive: hookenv.log('Missing cni resource.') return # Handle null resource publication, we check if filesize < 1mb filesize = os.stat(archive).st_size if filesize < 1000000: hookenv.log('Incomplete cni resource.') return unpack_path = '/opt/cni/bin' os.makedirs(unpack_path, exist_ok=True) cmd = ['tar', 'xfvz', archive, '-C', unpack_path] hookenv.log(cmd) check_call(cmd) set_flag('kubernetes.cni-plugins.installed')
def setup_config(): hookenv.status_set('maintenance', 'Configuring Radarr') backups = './backups' if radarr.charm_config['restore-config']: try: os.mkdir(backups) except OSError as e: if e.errno == 17: pass backupFile = hookenv.resource_get('radarrconfig') if backupFile: with ZipFile(backupFile, 'r') as inFile: inFile.extractall(radarr.config_dir) hookenv.log( "Restoring config, indexers are disabled enable with action when configuration has been checked", 'INFO') # Turn off indexers radarr.set_indexers(False) else: hookenv.log( "Add radarrconfig resource, see juju attach or disable restore-config", 'WARN') hookenv.status_set('blocked', 'waiting for radarrconfig resource') return else: host.service_start(radarr.service_name) configFile = Path(radarr.config_file) while not configFile.is_file(): time.sleep(1) radarr.modify_config(port=radarr.charm_config['port'], urlbase='None') hookenv.open_port(radarr.charm_config['port'], 'TCP') host.service_start(radarr.service_name) hookenv.status_set('active', 'Radarr is ready') set_state('radarr.configured')
def setup_insightedge_on_spark(spark): destination = Path('/usr/lib/insightedge') if not destination.exists(): hookenv.status_set('maintenance', 'fetching insightedge') filename = hookenv.resource_get('insightedge') if not filename: hookenv.status_set("blocked", "unable to fetch insightedge resource") hookenv.log("Failed to fetch InsightEdge resource") return hookenv.status_set('maintenance', 'installing insightedge') extracted = Path(fetch.install_remote('file://' + filename)) destination.rmtree_p() # in case doing a re-install extracted.dirs()[0].copytree(destination) # copy nested dir contents hookenv.status_set('maintenance', 'configuring insightedge') with host.chdir(destination): insightedge_jars = subprocess.check_output([ 'bash', '-c', '. {}; get_libs ,'.format( destination / 'sbin' / 'common-insightedge.sh') ], env={ 'INSIGHTEDGE_HOME': destination }).decode('utf8') spark.register_classpaths(insightedge_jars.split(',')) set_state('insightedge.installed')
def setup_config(): hookenv.status_set('maintenance', 'configuring') backups = './backups' if cp.charm_config['restore-config']: try: os.mkdir(backups) except OSError as e: if e.errno is 17: pass backup_file = hookenv.resource_get('couchconfig') if backup_file: with tarfile.open(backup_file, 'r:gz') as inFile: inFile.extractall(cp.config_dir) host.chownr(cp.home_dir, owner=cp.user, group=cp.user) cp.reload_config() cp.set_indexers(False) else: hookenv.log( "Add couchconfig resource, see juju attach or disable restore-config", 'ERROR') hookenv.status_set('blocked', 'Waiting on couchconfig resource') return else: cp.start() while not Path(cp.settings_file).is_file(): time.sleep(1) cp.stop() cp.reload_config() cp.set_host(socket.getfqdn( )) # This could use the config parameter and not require checking cp.set_port() cp.save_config() cp.start() hookenv.status_set('active', 'Couchpotato is ready') set_state('couchpotato.configured')
def install(self): ''' Fetch resources ''' self.dist_config.add_users() self.dist_config.add_dirs() result = resource_get('tomee') if not result: log("Failed to fetch TomEE resource") return False unitdata.kv().set("tomeetarball", result) log("TomEE tarball path is {}".format(result)) tomee_install_dir = self.dist_config.path('tomee_dir') with chdir(tomee_install_dir): utils.run_as('tomcat', 'tar', '-zxvf', '{}'.format(result)) tomee_dirs = [f for f in os.listdir(tomee_install_dir) if f.startswith('apache-tomee')] catalina_home = os.path.join(tomee_install_dir, tomee_dirs[0]) with utils.environment_edit_in_place('/etc/environment') as env: env['CATALINA_HOME'] = catalina_home unitdata.kv().set("catalina_home", catalina_home) self.open_ports() return True
def get_resource(basename, expected_size=1000000): '''Return the named resource from Juju based on architecture, with a minimum expected size. Handle fetching errors, zero byte files and incomplete resources with the approprate status message.''' resource = None try: name = '{0}-{1}'.format(basename, arch()) resource = hookenv.resource_get(name) except Exception as e: # Catch and log all exceptions, connection, and not implemented, etc. hookenv.log(e) error_message = 'Error fetching the {0} resource.'.format(name) hookenv.status_set('blocked', error_message) return resource # When the resource is empty string we have nothing to do. if not resource: zero_message = 'Missing {0} resource.'.format(name) hookenv.status_set('blocked', zero_message) else: # Check for incomplete resources when the function returned correctly. filesize = os.stat(resource).st_size if filesize < expected_size: incomplete_message = 'Incomplete {0} resource.'.format(name) hookenv.status_set('blocked', incomplete_message) return resource
def _install_mattermost(): # Backup existing installation if it exists backup_path = None if os.path.isdir('/opt/mattermost'): backup_path = "/opt/mattermost.back{}".format( str(datetime.datetime.now())) shutil.move('/opt/mattermost', backup_path) # Create mattermost user & group if not exists if not group_exists('mattermost'): add_group("mattermost") if not user_exists('mattermost'): adduser("mattermost", system_user=True) # Get and uppack resource mattermost_bdist = resource_get('bdist') extract_tarfile(mattermost_bdist, destpath="/opt") # Render systemd template render(source="mattermost.service.tmpl", target="/etc/systemd/system/mattermost.service", perms=0o644, owner="root", context={}) check_call(['systemctl', 'daemon-reload']) if backup_path: shutil.move('{}/config/config.json'.format(backup_path), '/opt/mattermost/config/config.json') shutil.move('{}/data'.format(backup_path), '/opt/mattermost/') # Create dirs that don't exist yet for folder in ("data", "logs", "config"): os.makedirs("/opt/mattermost/{}".format(folder), mode=0o700, exist_ok=True) chownr("/opt/mattermost", "mattermost", "mattermost", chowntopdir=True)
def write_config(): '''Unpack secrets files and configure the charm.''' try: archive = hookenv.resource_get('secrets') except Exception: message = 'Error fetching the secrets resource.' blocked(message) return if not archive: blocked('Missing secrets resource.') return # Handle null resource publication, we check if filesize < 1mb filesize = os.stat(archive).st_size if filesize <= 1: blocked('Incomplete secrets resource.') return command = 'tar -xvzf {0} -C {1}'.format(archive, "/home/ubuntu") hookenv.log(command) check_call(split(command)) blocked('Unable to configure charm - please see log') push_gateway = write_config_file() write_cron_job() active('Configured push gateway %s' % (push_gateway, ))
def install_dockerbeat(): ''' Installs dockerbeat from resources, with a fallback option to try to fetch over the network, for 1.25.5 hosts''' try: bin_path = resource_get('dockerbeat') except NotImplementedError: # Attempt to fetch and install from configured uri with validation bin_path = download_from_upstream() full_beat_path = '/usr/local/bin/dockerbeat' if not bin_path: status_set('blocked', 'Missing dockerbeat binary') return install(bin_path, full_beat_path) os.chmod(full_beat_path, 0o755) codename = lsb_release()['DISTRIB_CODENAME'] # render the apropriate init systems configuration if codename == 'trusty': render('upstart', '/etc/init/dockerbeat.conf', {}) else: render('systemd', '/etc/systemd/system/dockerbeat.service', {}) set_state('dockerbeat.installed')
def install_calico_binaries(): ''' Unpack the Calico binaries. ''' try: archive = resource_get('calico') except Exception: message = 'Error fetching the calico resource.' log(message) status_set('blocked', message) return if not archive: message = 'Missing calico resource.' log(message) status_set('blocked', message) return filesize = os.stat(archive).st_size if filesize < 1000000: message = 'Incomplete calico resource' log(message) status_set('blocked', message) return status_set('maintenance', 'Unpacking calico resource.') charm_dir = os.getenv('CHARM_DIR') unpack_path = os.path.join(charm_dir, 'files', 'calico') os.makedirs(unpack_path, exist_ok=True) cmd = ['tar', 'xfz', archive, '-C', unpack_path] log(cmd) check_call(cmd) apps = [ { 'name': 'calicoctl', 'path': CALICOCTL_PATH }, { 'name': 'calico', 'path': '/opt/cni/bin' }, { 'name': 'calico-ipam', 'path': '/opt/cni/bin' }, { 'name': 'portmap', 'path': '/opt/cni/bin' }, ] for app in apps: unpacked = os.path.join(unpack_path, app['name']) app_path = os.path.join(app['path'], app['name']) install = ['install', '-v', '-D', unpacked, app_path] check_call(install) set_state('calico.binaries.installed')
def install(): '''Unpack and put the Kubernetes master files on the path.''' # Get the resource via resource_get try: archive = hookenv.resource_get('kubernetes') except Exception: message = 'Error fetching the kubernetes resource.' hookenv.log(message) hookenv.status_set('blocked', message) return if not archive: hookenv.log('Missing kubernetes resource.') hookenv.status_set('blocked', 'Missing kubernetes resource.') return # Handle null resource publication, we check if filesize < 1mb filesize = os.stat(archive).st_size if filesize < 1000000: hookenv.status_set('blocked', 'Incomplete kubernetes resource.') return hookenv.status_set('maintenance', 'Unpacking kubernetes resource.') files_dir = os.path.join(hookenv.charm_dir(), 'files') os.makedirs(files_dir, exist_ok=True) command = 'tar -xvzf {0} -C {1}'.format(archive, files_dir) hookenv.log(command) check_call(split(command)) apps = [ { 'name': 'kube-apiserver', 'path': '/usr/local/bin' }, { 'name': 'kube-controller-manager', 'path': '/usr/local/bin' }, { 'name': 'kube-scheduler', 'path': '/usr/local/bin' }, { 'name': 'kubectl', 'path': '/usr/local/bin' }, ] for app in apps: unpacked = '{}/{}'.format(files_dir, app['name']) app_path = os.path.join(app['path'], app['name']) install = ['install', '-v', '-D', unpacked, app_path] hookenv.log(install) check_call(install) set_state('kubernetes-master.components.installed')
def calculate_resource_checksum(resource): ''' Calculate a checksum for a resource ''' md5 = hashlib.md5() path = hookenv.resource_get(resource) if path: with open(path, 'rb') as f: data = f.read() md5.update(data) return md5.hexdigest()
def install_calico_binaries(): ''' Unpack the Calico binaries. ''' # on intel, the resource is called 'calico'; other arches have a suffix architecture = arch() if architecture == 'amd64': resource_name = 'calico-cni' else: resource_name = 'calico-cni-{}'.format(architecture) try: archive = resource_get(resource_name) except Exception: message = 'Error fetching the calico resource.' log(message) status.blocked(message) return if not archive: message = 'Missing calico resource.' log(message) status.blocked(message) return filesize = os.stat(archive).st_size if filesize < 1000000: message = 'Incomplete calico resource' log(message) status.blocked(message) return status.maintenance('Unpacking calico resource.') charm_dir = os.getenv('CHARM_DIR') unpack_path = os.path.join(charm_dir, 'files', 'calico') os.makedirs(unpack_path, exist_ok=True) cmd = ['tar', 'xfz', archive, '-C', unpack_path] log(cmd) check_call(cmd) apps = [ { 'name': 'calico', 'path': '/opt/cni/bin' }, { 'name': 'calico-ipam', 'path': '/opt/cni/bin' }, ] for app in apps: unpacked = os.path.join(unpack_path, app['name']) app_path = os.path.join(app['path'], app['name']) install = ['install', '-v', '-D', unpacked, app_path] check_call(install) set_state('calico.binaries.installed')
def calculate_resource_checksum(resource): """Calculate a checksum for a resource""" md5 = hashlib.md5() path = hookenv.resource_get(resource) if path: with open(path, "rb") as f: data = f.read() md5.update(data) return md5.hexdigest()
def calculate_keytab_checksum(resource): ''' Calculate a checksum for a resource ''' md5 = hashlib.md5() path = resource_get(RESOURCE) if path: with open(path, 'rb') as f: data = f.read() md5.update(data) return md5.hexdigest()
def deb_install(): try: deb = resource_get('deb') d = DebPackage(deb) d.install() set_state('elasticsearch.installed') except CalledProcessError: status_set('error', 'Elasticsearch could not be installed with package')
def install(): '''Install the easy-rsa software that is used by this layer.''' easyrsa_resource = None try: # Try to get the resource from Juju. easyrsa_resource = resource_get('easyrsa') except Exception as e: message = 'An error occurred fetching the easyrsa resource.' hookenv.log(message) hookenv.log(e) hookenv.status_set('blocked', message) return if not easyrsa_resource: hookenv.status_set('blocked', 'The easyrsa resource is missing.') return # Get the filesize in bytes. filesize = os.stat(easyrsa_resource).st_size # When the filesize is less than 10 KB we do not have a real file. if filesize < 10240: hookenv.status_set('blocked', 'The easyrsa resource is not complete.') return # Expand the archive in the charm directory creating an EasyRSA directory. untar = 'tar -xvzf {0} -C {1}'.format(easyrsa_resource, charm_directory) check_call(split(untar)) version = get_version(easyrsa_resource) # Save the version in the key/value store of the charm. unitdata.kv().set('easyrsa-version', version) if os.path.islink(easyrsa_directory): check_call(split('rm -v {0}'.format(easyrsa_directory))) # Link the EasyRSA version directory to a common name. link = 'ln -v -s {0}/EasyRSA-{1} {2}'.format(charm_directory, version, easyrsa_directory) check_call(split(link)) # The charm pki directory contains backup of pki for upgrades. charm_pki_directory = os.path.join(charm_directory, 'pki') if os.path.isdir(charm_pki_directory): new_pki_directory = os.path.join(easyrsa_directory, 'pki') # Only copy the directory if the new_pki_directory does not exist. if not os.path.isdir(new_pki_directory): # Copy the pki to this new directory. shutil.copytree(charm_pki_directory, new_pki_directory, symlinks=True) # We are done with the old charm pki directory, so delete contents. shutil.rmtree(charm_pki_directory) else: # Create new pki. with chdir(easyrsa_directory): check_call(split('./easyrsa --batch init-pki 2>&1')) set_state('easyrsa.installed')
def install(): """Install the easy-rsa software that is used by this layer.""" easyrsa_resource = None try: # Try to get the resource from Juju. easyrsa_resource = resource_get("easyrsa") except Exception as e: message = "An error occurred fetching the easyrsa resource." hookenv.log(message) hookenv.log(e) status.blocked(message) return if not easyrsa_resource: status.blocked("The easyrsa resource is missing.") return # Get the filesize in bytes. filesize = os.stat(easyrsa_resource).st_size # When the filesize is less than 10 KB we do not have a real file. if filesize < 10240: status.blocked("The easyrsa resource is not complete.") return # Expand the archive in the charm directory creating an EasyRSA directory. untar = "tar -xvzf {0} -C {1}".format(easyrsa_resource, charm_directory) check_call(split(untar)) version = get_version(easyrsa_resource) # Save the version in the key/value store of the charm. unitdata.kv().set("easyrsa-version", version) if islink(easyrsa_directory): check_call(split("rm -v {0}".format(easyrsa_directory))) # Link the EasyRSA version directory to a common name. link = "ln -v -s {0}/EasyRSA-{1} {2}".format(charm_directory, version, easyrsa_directory) check_call(split(link)) # The charm pki directory contains backup of pki for upgrades. charm_pki_directory = os.path.join(charm_directory, "pki") if os.path.isdir(charm_pki_directory): new_pki_directory = os.path.join(easyrsa_directory, "pki") # Only copy the directory if the new_pki_directory does not exist. if not os.path.isdir(new_pki_directory): # Copy the pki to this new directory. shutil.copytree(charm_pki_directory, new_pki_directory, symlinks=True) # We are done with the old charm pki directory, so delete contents. shutil.rmtree(charm_pki_directory) else: # Create new pki. with chdir(easyrsa_directory): check_call(split("./easyrsa --batch init-pki 2>&1")) set_flag("easyrsa.installed")
def install_resource(): remove_state('charm-svg.ready') hookenv.status_set('maintenance', 'extracting resources') svg_bin = hookenv.resource_get('python-jujusvg') web_tar = hookenv.resource_get('webapp') hookenv.status_set('maintenance', 'installing python-jujusvg') shutil.copy(svg_bin, charmsvg.JUJUSVG_PATH) os.chmod(charmsvg.JUJUSVG_PATH, 0o755) hookenv.status_set('maintenance', 'installing webapp') tar = tarfile.open(web_tar) tar.extractall(charmsvg.INSTALL_PATH) chownr(charmsvg.INSTALL_PATH, 'www-data', 'www-data') set_state('charm-svg.installed')
def get_model_from_resource(): filename = hookenv.resource_get('model') if not filename: return None filepath = Path(filename) if not filepath.exists(): return None if filepath.stat().st_size == 0: return None return filename
def load_docker_image(name): img_path = resource_get(name) if not img_path: return None image_id = get_docker_image_id(name) if image_id: # remove previous image check_call([DOCKER_CLI, "rmi", image_id]) check_call([DOCKER_CLI, "load", "-i", img_path]) return get_docker_image_id(name)
def _resource_get(snapname): '''Used to fetch the resource path of the given name. This wrapper obtains a resource path and adds an additional check to return False if the resource is zero length. ''' res_path = hookenv.resource_get(snapname) if res_path and os.stat(res_path).st_size != 0: return res_path return False
def install_cni_plugins(): ''' Unpack the cni-plugins resource ''' charm_dir = os.getenv('CHARM_DIR') # Get the resource via resource_get try: resource_name = 'cni-{}'.format(arch()) archive = hookenv.resource_get(resource_name) except Exception: message = 'Error fetching the cni resource.' hookenv.log(message) hookenv.status_set('blocked', message) return if not archive: hookenv.log('Missing cni resource.') hookenv.status_set('blocked', 'Missing cni resource.') return # Handle null resource publication, we check if filesize < 1mb filesize = os.stat(archive).st_size if filesize < 1000000: hookenv.status_set('blocked', 'Incomplete cni resource.') return hookenv.status_set('maintenance', 'Unpacking cni resource.') unpack_path = '{}/files/cni'.format(charm_dir) os.makedirs(unpack_path, exist_ok=True) cmd = ['tar', 'xfvz', archive, '-C', unpack_path] hookenv.log(cmd) check_call(cmd) apps = [ {'name': 'loopback', 'path': '/opt/cni/bin'} ] for app in apps: unpacked = '{}/{}'.format(unpack_path, app['name']) app_path = os.path.join(app['path'], app['name']) install = ['install', '-v', '-D', unpacked, app_path] hookenv.log(install) check_call(install) # Used by the "registry" action. The action is run on a single worker, but # the registry pod can end up on any worker, so we need this directory on # all the workers. os.makedirs('/srv/registry', exist_ok=True) set_state('kubernetes-worker.cni-plugins.installed')
def check_for_upgrade_needed(): '''An upgrade charm event was triggered by Juju, react to that here.''' hookenv.status_set('maintenance', 'Checking resources') migrate_from_pre_snaps() add_rbac_roles() set_state('reconfigure.authentication.setup') remove_state('authentication.setup') resources = ['kubectl', 'kube-apiserver', 'kube-controller-manager', 'kube-scheduler', 'cdk-addons'] paths = [hookenv.resource_get(resource) for resource in resources] if any_file_changed(paths): set_upgrade_needed()
def configure_examples(self): """ Install sparkpi.sh and sample data to /home/ubuntu. The sparkpi.sh script demonstrates spark-submit with the SparkPi class included with Spark. This small script is packed into the spark charm source in the ./scripts subdirectory. The sample data is used for benchmarks (only PageRank for now). This may grow quite large in the future, so we utilize Juju Resources for getting this data onto the unit. Sample data originated as follows: - PageRank: https://snap.stanford.edu/data/web-Google.html """ # Handle sparkpi.sh script_source = 'scripts/sparkpi.sh' script_path = Path(script_source) if script_path.exists(): script_target = '/home/ubuntu/sparkpi.sh' new_hash = host.file_hash(script_source) old_hash = unitdata.kv().get('sparkpi.hash') if new_hash != old_hash: hookenv.log('Installing SparkPi script') script_path.copy(script_target) Path(script_target).chmod(0o755) Path(script_target).chown('ubuntu', 'hadoop') unitdata.kv().set('sparkpi.hash', new_hash) hookenv.log('SparkPi script was installed successfully') # Handle sample data sample_source = hookenv.resource_get('sample-data') sample_path = sample_source and Path(sample_source) if sample_path and sample_path.exists() and sample_path.stat().st_size: sample_target = '/home/ubuntu' new_hash = host.file_hash(sample_source) old_hash = unitdata.kv().get('sample-data.hash') if new_hash != old_hash: hookenv.log('Extracting Spark sample data') # Extract the sample data; since sample data does not impact # functionality, log any extraction error but don't fail. try: archive.extract(sample_path, destpath=sample_target) except Exception: hookenv.log('Unable to extract Spark sample data: {}' .format(sample_path)) else: unitdata.kv().set('sample-data.hash', new_hash) hookenv.log('Spark sample data was extracted successfully')
def install_kubernetes_components(): ''' Unpack the kubernetes worker binaries ''' charm_dir = os.getenv('CHARM_DIR') # Get the resource via resource_get try: archive = hookenv.resource_get('kubernetes') except Exception: message = 'Error fetching the kubernetes resource.' hookenv.log(message) hookenv.status_set('blocked', message) return if not archive: hookenv.log('Missing kubernetes resource.') hookenv.status_set('blocked', 'Missing kubernetes resource.') return # Handle null resource publication, we check if filesize < 1mb filesize = os.stat(archive).st_size if filesize < 1000000: hookenv.status_set('blocked', 'Incomplete kubernetes resource.') return hookenv.status_set('maintenance', 'Unpacking kubernetes resource.') unpack_path = '{}/files/kubernetes'.format(charm_dir) os.makedirs(unpack_path, exist_ok=True) cmd = ['tar', 'xfvz', archive, '-C', unpack_path] hookenv.log(cmd) check_call(cmd) apps = [ {'name': 'kubelet', 'path': '/usr/local/bin'}, {'name': 'kube-proxy', 'path': '/usr/local/bin'}, {'name': 'kubectl', 'path': '/usr/local/bin'}, {'name': 'loopback', 'path': '/opt/cni/bin'} ] for app in apps: unpacked = '{}/{}'.format(unpack_path, app['name']) app_path = os.path.join(app['path'], app['name']) install = ['install', '-v', '-D', unpacked, app_path] hookenv.log(install) check_call(install) reset_versions() set_state('kubernetes-worker.components.installed')
def install(): '''Unpack and put the Kubernetes master files on the path.''' # Get the resource via resource_get try: archive = hookenv.resource_get('kubernetes') except Exception: message = 'Error fetching the kubernetes resource.' hookenv.log(message) hookenv.status_set('blocked', message) return if not archive: hookenv.log('Missing kubernetes resource.') hookenv.status_set('blocked', 'Missing kubernetes resource.') return # Handle null resource publication, we check if filesize < 1mb filesize = os.stat(archive).st_size if filesize < 1000000: hookenv.status_set('blocked', 'Incomplete kubernetes resource.') return hookenv.status_set('maintenance', 'Unpacking kubernetes resource.') files_dir = os.path.join(hookenv.charm_dir(), 'files') os.makedirs(files_dir, exist_ok=True) command = 'tar -xvzf {0} -C {1}'.format(archive, files_dir) hookenv.log(command) check_call(split(command)) apps = [ {'name': 'kube-apiserver', 'path': '/usr/local/bin'}, {'name': 'kube-controller-manager', 'path': '/usr/local/bin'}, {'name': 'kube-scheduler', 'path': '/usr/local/bin'}, {'name': 'kubectl', 'path': '/usr/local/bin'}, ] for app in apps: unpacked = '{}/{}'.format(files_dir, app['name']) app_path = os.path.join(app['path'], app['name']) install = ['install', '-v', '-D', unpacked, app_path] hookenv.log(install) check_call(install) reset_versions() set_state('kubernetes-master.components.installed')
def install_kubernetes_e2e(): ''' Deliver the e2e and kubectl components from the binary resource stream packages declared in the charm ''' charm_dir = os.getenv('CHARM_DIR') arch = determine_arch() # Get the resource via resource_get resource = 'e2e_{}'.format(arch) try: archive = hookenv.resource_get(resource) except Exception: message = 'Error fetching the {} resource.'.format(resource) hookenv.log(message) hookenv.status_set('blocked', message) return if not archive: hookenv.log('Missing {} resource.'.format(resource)) hookenv.status_set('blocked', 'Missing {} resource.'.format(resource)) return # Handle null resource publication, we check if filesize < 1mb filesize = os.stat(archive).st_size if filesize < 1000000: hookenv.status_set('blocked', 'Incomplete {} resource.'.format(resource)) return hookenv.status_set('maintenance', 'Unpacking {} resource.'.format(resource)) unpack_path = '{}/files/kubernetes'.format(charm_dir) os.makedirs(unpack_path, exist_ok=True) cmd = ['tar', 'xfvz', archive, '-C', unpack_path] hookenv.log(cmd) check_call(cmd) services = ['e2e.test', 'ginkgo', 'kubectl'] for service in services: unpacked = '{}/{}'.format(unpack_path, service) app_path = '/usr/local/bin/{}'.format(service) install = ['install', '-v', unpacked, app_path] call(install) set_state('kubernetes-e2e.installed')
def migrate_resource_checksums(): ''' Migrate resource checksums from the old schema to the new one ''' for resource in snap_resources: new_key = get_resource_checksum_db_key(resource) if not db.get(new_key): path = hookenv.resource_get(resource) if path: # old key from charms.reactive.helpers.any_file_changed old_key = 'reactive.files_changed.' + path old_checksum = db.get(old_key) db.set(new_key, old_checksum) else: # No resource is attached. Previously, this meant no checksum # would be calculated and stored. But now we calculate it as if # it is a 0-byte resource, so let's go ahead and do that. zero_checksum = hashlib.md5().hexdigest() db.set(new_key, zero_checksum)
def install_apache_nifi(): conf = hookenv.config() hookenv.log('Installing Apache NiFi') hookenv.status_set('maintenance', 'Installing Apache NiFi') tfile = tarfile.open(hookenv.resource_get('apache-nifi'), 'r') filesdir = '{}/files'.format(hookenv.charm_dir()) tfile.extractall(filesdir) re_edit_in_place('{}/nifi-1.1.1/conf/nifi.properties'.format(filesdir), { r'.*nifi.web.http.port.*': 'nifi.web.http.port={}'.format(conf['nifi-port']), }) subprocess.check_call(['bash', '{}/nifi-1.1.1/bin/nifi.sh'.format(filesdir), 'install']) if service_restart('nifi'): hookenv.open_port(conf['nifi-port']) hookenv.status_set('active', 'Running: standalone mode') set_state('apache-nifi.installed') else: hookenv.status_set('error', 'Failed to start')
def snap_resources_changed(): ''' Check if the snapped resources have changed. The first time this method is called will report "unknown". Returns: "yes" in case a snap resource file has changed, "no" in case a snap resources are the same as last call, "unknown" if it is the first time this method is called ''' db = unitdata.kv() resources = ['kubectl', 'kube-apiserver', 'kube-controller-manager', 'kube-scheduler', 'cdk-addons'] paths = [hookenv.resource_get(resource) for resource in resources] if db.get('snap.resources.fingerprint.initialised'): result = 'yes' if any_file_changed(paths) else 'no' return result else: db.set('snap.resources.fingerprint.initialised', True) any_file_changed(paths) return 'unknown'
def install_dockerbeat(): ''' Installs dockerbeat from resources. ''' bin_path = resource_get('dockerbeat') full_beat_path = '/usr/local/bin/dockerbeat' if not bin_path: status_set('blocked', 'Please provide the dockerbeat binary') return install(bin_path, full_beat_path) os.chmod(full_beat_path, 0o755) codename = lsb_release()['DISTRIB_CODENAME'] # render the apropriate init systems configuration if codename == 'trusty': render('upstart', '/etc/init/dockerbeat.conf', {}) else: render('systemd', '/etc/systemd/system/dockerbeat.service', {}) set_state('dockerbeat.installed')
def install(): status_set('maintenance', 'Installing PDI') adduser('etl') mkdir('/home/etl') chownr('/home/etl', 'etl', 'etl', chowntopdir=True) os.chmod('/home/etl', 0o755) #au = ArchiveUrlFetchHandler() #au.install(hookenv.config()['pdi_url'], '/opt/') pdiarchive = hookenv.resource_get('pdi-archive') tar = tarfile.open(pdiarchive) tar.extractall("/opt/") chownr('/opt/data-integration', 'etl', 'etl', chowntopdir=True) st = os.stat('/opt/data-integration/spoon.sh') os.chmod('/opt/data-integration/spoon.sh', st.st_mode | stat.S_IEXEC) os.chmod('/opt/data-integration/carte.sh', st.st_mode | stat.S_IEXEC) os.chmod('/opt/data-integration/encr.sh', st.st_mode | stat.S_IEXEC) os.chmod('/opt/data-integration/kitchen.sh', st.st_mode | stat.S_IEXEC) os.chmod('/opt/data-integration/pan.sh', st.st_mode | stat.S_IEXEC) status_set('maintenance', 'PDI Installed') set_state('pdi.installed')
def check_custom_theme(): if not config('custom-theme'): log('No custom theme configured, exiting') return try: os.mkdir(CUSTOM_THEME_DIR) except OSError as e: if e.errno is 17: pass # already exists theme_file = resource_get('theme') log('Retreived resource: {}'.format(theme_file)) if theme_file: with tarfile.open(theme_file, 'r:gz') as in_file: in_file.extractall(CUSTOM_THEME_DIR) custom_settings = '{}/local_settings.py'.format(CUSTOM_THEME_DIR) if os.path.isfile(custom_settings): try: os.symlink(custom_settings, LOCAL_DIR + 'custom_theme.py') except OSError as e: if e.errno is 17: pass # already exists log('Custom theme updated'.format(theme_file))
def download_archive(): check_call(['apt-get', 'install', '-qy', 'unzip']) config = hookenv.config() ghost_source = hookenv.resource_get('ghost-stable') ghost_source_checksum = host.file_hash(ghost_source, 'sha256') if config.get('checksum', 0) == ghost_source_checksum: hookenv.log("Checksums match no need to extract source archive.") return kv.set('checksum', ghost_source_checksum) # delete the app dir contents (but not the dir itself) dist_dir = node_dist_dir() for entry in listdir(dist_dir): if path.isfile(entry): unlink(entry) elif path.isdir(entry): rmtree(entry) cmd = ('unzip', '-uo', ghost_source, '-d', dist_dir) hookenv.log("Extracting Ghost: {}".format(' '.join(cmd))) check_call(cmd)
def get_snap_resource_paths(): resources = ['kubectl', 'kubelet', 'kube-proxy'] return [hookenv.resource_get(resource) for resource in resources]
def fetch_bigtop_release(self): """ Unpack or clone the Bigtop repo. This will fetch the upstream source needed to deploy Bigtop applications. To support restricted networks where git cloning may not be possible, this method will first try to unpack the attached bigtop-repo resource. If this does not exist, it will fall back to cloning the upstream repo with an appropriate branch. The source will be availabe in the bigtop_base directory. """ hookenv.status_set('maintenance', 'fetching bigtop source') Path(self.bigtop_base).rmtree_p() filename = hookenv.resource_get('bigtop-repo') filepath = filename and Path(filename) if filepath and filepath.exists() and filepath.stat().st_size: new_hash = file_hash(filename) old_hash = unitdata.kv().get('bigtop-repo.hash') if new_hash != old_hash: hookenv.status_set('maintenance', 'unzipping bigtop-repo') with chdir(filepath.dirname()): try: # NB: we cannot use the payload.archive helper because # it relies on Zipfile.extractall, which doesn't # preserve perms (https://bugs.python.org/issue15795). # Subprocess an unzip the 'ol fashioned way. utils.run_as('root', 'unzip', '-qo', filepath) except subprocess.CalledProcessError as e: hookenv.status_set('blocked', 'failed to unzip bigtop-repo') raise BigtopError( u"Failed to unzip {}: {}".format(filepath, e)) else: # We may not know the name of the archive's subdirs, # but we always want to treat the dir with bigtop.bom # as the source root dir. Copy this tree to bigtop_base. for dirpath, dirs, files in os.walk(filepath.dirname()): for name in files: if name == 'bigtop.bom': Path(dirpath).copytree( self.bigtop_base, symlinks=True) break unitdata.kv().set('bigtop-repo.hash', new_hash) else: hookenv.log('Resource bigtop-repo is unchanged') else: hookenv.status_set('maintenance', 'cloning bigtop repo') bigtop_repo = 'https://github.com/apache/bigtop.git' if self.bigtop_version == '1.1.0': bigtop_branch = 'branch-1.1' elif self.bigtop_version.startswith('1.2'): bigtop_branch = 'branch-1.2' elif self.bigtop_version.startswith('1.3'): bigtop_branch = 'branch-1.3' elif self.bigtop_version == 'master': bigtop_branch = 'master' else: raise BigtopError( u"Unknown Bigtop version for repo branch: {}".format(self.bigtop_version)) # NB: we cannot use the fetch.install_remote helper because that # relies on the deb-only python3-apt package. Subordinates cannot # install deb dependencies into their venv, so to ensure bigtop # subordinate charms succeed, subprocess the required git clone. try: utils.run_as('root', 'git', 'clone', bigtop_repo, '--branch', bigtop_branch, '--single-branch', self.bigtop_base) except subprocess.CalledProcessError as e: hookenv.status_set('blocked', 'failed to clone bigtop repo') raise BigtopError( u"Failed to clone {}: {}".format(bigtop_repo, e)) # Make sure the repo looks like we expect if Path(self.bigtop_base / 'bigtop.bom').exists(): hookenv.status_set('waiting', 'bigtop source fetched') else: hookenv.status_set('blocked', 'invalid bigtop source') raise BigtopError( u"Unrecognized source repo in {}".format(self.bigtop_base))
def check_resources_for_upgrade_needed(): hookenv.status_set('maintenance', 'Checking resources') resources = ['kubectl', 'kubelet', 'kube-proxy'] paths = [hookenv.resource_get(resource) for resource in resources] if any_file_changed(paths): set_upgrade_needed()
def reinstall_spark(force=False): """ Gather the state of our deployment and (re)install when leaders, hadoop, sparkpeers, or zookeepers change. In the future this should also fire when Cassandra or any other storage comes or goes. Config changed events will also call this method, but that is invoked with a separate handler below. Use a deployment-matrix dict to track changes and (re)install as needed. """ spark_master_host = leadership.leader_get('master-fqdn') if not spark_master_host: hookenv.status_set('maintenance', 'juju leader not elected yet') return mode = hookenv.config()['spark_execution_mode'] peers = None zks = None # If mode is standalone and ZK is ready, we are in HA. Do not consider # the master_host from juju leadership in our matrix. ZK handles this. if (mode == 'standalone' and is_state('zookeeper.ready')): spark_master_host = '' zk = RelationBase.from_state('zookeeper.ready') zks = zk.zookeepers() # peers are only used to set our MASTER_URL in standalone HA mode peers = get_spark_peers() # Construct a deployment matrix sample_data = hookenv.resource_get('sample-data') deployment_matrix = { 'hdfs_ready': is_state('hadoop.hdfs.ready'), 'peers': peers, 'sample_data': host.file_hash(sample_data) if sample_data else None, 'spark_master': spark_master_host, 'yarn_ready': is_state('hadoop.yarn.ready'), 'zookeepers': zks, } # No-op if we are not forcing a reinstall or our matrix is unchanged. if not (force or data_changed('deployment_matrix', deployment_matrix)): report_status() return # (Re)install based on our execution mode hookenv.status_set('maintenance', 'configuring spark in {} mode'.format(mode)) hookenv.log("Configuring spark with deployment matrix: {}".format(deployment_matrix)) if mode.startswith('yarn') and is_state('hadoop.yarn.ready'): install_spark_yarn() elif mode.startswith('local') or mode == 'standalone': install_spark_standalone(zks, peers) else: # Something's wrong (probably requested yarn without yarn.ready). remove_state('spark.started') report_status() return # restart services to pick up possible config changes spark = Spark() spark.stop() spark.start() set_state('spark.started') report_status()