def install_crowd(): host.adduser('crowd') fetch.install_remote( source=CROWD_URL.format(hookenv.config('crowd-version')), # version dest=CROWD_INSTALL, # checksum=None, # hash_type='sha1' ) host.lchownr( CROWD_INSTALL, owner='crowd', group='crowd', ) for dir in [ '{}/atlassian-crowd-{}'.format(CROWD_INSTALL, hookenv.config('crowd-version')), '/var/crowd-home', '/var/crowd-home/shared/', ]: try: mkdir(dir) except: pass host.chownr( dir, owner='crowd', group='crowd', chowntopdir=True, ) set_flag('crowd.installed')
def test_installs_remote(self, _plugins, _log): h1 = MagicMock(name="h1") h1.can_handle.return_value = "Nope" h2 = MagicMock(name="h2") h2.can_handle.return_value = True h2.install.side_effect = fetch.UnhandledSource() h3 = MagicMock(name="h3") h3.can_handle.return_value = True h3.install.return_value = "foo" _plugins.return_value = [h1, h2, h3] for url in self.valid_urls: result = fetch.install_remote(url) h1.can_handle.assert_called_with(url) h2.can_handle.assert_called_with(url) h3.can_handle.assert_called_with(url) h1.install.assert_not_called() h2.install.assert_called_with(url) h3.install.assert_called_with(url) self.assertEqual(result, "foo") fetch.install_remote('url', extra_arg=True) h2.install.assert_called_with('url', extra_arg=True)
def setup_git(self, branch, git_dir): """Clone tempest and symlink in rendered tempest.conf""" conf = hookenv.config() if not os.path.exists(git_dir): git_url = conf['tempest-source'] fetch.install_remote(str(git_url), dest=str(git_dir), branch=str(branch), depth=str(1)) conf_symlink = git_dir + '/tempest/etc/tempest.conf' if not os.path.exists(conf_symlink): os.symlink(self.TEMPEST_CONF, conf_symlink)
def update_radarr(self): ''' Unpacks downloaded Radarr build ''' # recursive chown and make directory self.configure_installdir() # do the download and unpack url = self.get_latest_release() if url: fetch.install_remote(url, dest='/opt/') # another recursive chown to fix permissions self.configure_installdir() return True return False
def install_cucumber(): # User messaging, that the charm is not configured if not config('rebacca_url'): status_set('blocked', 'rebacca_url not configured') return if not config('rebacca_sum'): status_set('blocked', 'rebacca_sum not configured') return # curl's the file passed as first argument, and verifies with # second argument install_remote(config('rebacca_url'), checksum=config('rebacca_sum')) set_state('cucumber.installed')
def install_review_queue(): status_set('maintenance', 'Installing Review Queue') with tempfile.TemporaryDirectory() as tmp_dir: install_dir = install_remote(config['repo'], dest=tmp_dir) contents = os.listdir(install_dir) if install_dir == tmp_dir and len(contents) == 1: # unlike the git handler, the archive handler just returns tmp_dir # even if the archive contents are nested in a folder as they # should be, so we have to normalize for that here install_dir = os.path.join(install_dir, contents[0]) shutil.rmtree(APP_DIR, ignore_errors=True) log('Moving app source from {} to {}'.format( install_dir, APP_DIR)) shutil.move(install_dir, APP_DIR) subprocess.check_call('make .venv'.split(), cwd=APP_DIR) if init_is_systemd(): shutil.copyfile(SYSTEMD_SRC, SYSTEMD_DEST) shutil.copyfile(SYSTEMD_TASK_SRC, SYSTEMD_TASK_DEST) subprocess.check_call(['systemctl', 'daemon-reload']) else: shutil.copyfile(UPSTART_SRC, UPSTART_DEST) shutil.copyfile(UPSTART_TASK_SRC, UPSTART_TASK_DEST) subprocess.check_call(['initctl', 'reload-configuration']) shutil.copyfile(LP_CREDS_SRC, LP_CREDS_DEST) shutil.copyfile(APP_INI_SRC, APP_INI_DEST) chownr(APP_DIR, APP_USER, APP_GROUP) set_state('reviewqueue.installed') change_config() update_db() update_amqp() update_secret() set_state('reviewqueue.restart')
def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, update_requirements): """ Clone and install a single git repository. """ if not os.path.exists(parent_dir): juju_log('Directory already exists at {}. ' 'No need to create directory.'.format(parent_dir)) os.mkdir(parent_dir) juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) repo_dir = install_remote(repo, dest=parent_dir, branch=branch, depth=depth) venv = os.path.join(parent_dir, 'venv') if update_requirements: if not requirements_dir: error_out('requirements repo must be cloned before ' 'updating from global requirements.') _git_update_requirements(venv, repo_dir, requirements_dir) juju_log('Installing git repo from dir: {}'.format(repo_dir)) if http_proxy: pip_install(repo_dir, proxy=http_proxy, venv=venv) else: pip_install(repo_dir, venv=venv) return repo_dir
def setup_insightedge_on_spark(spark): destination = Path('/usr/lib/insightedge') if not destination.exists(): hookenv.status_set('maintenance', 'fetching insightedge') filename = hookenv.resource_get('insightedge') if not filename: hookenv.status_set("blocked", "unable to fetch insightedge resource") hookenv.log("Failed to fetch InsightEdge resource") return hookenv.status_set('maintenance', 'installing insightedge') extracted = Path(fetch.install_remote('file://' + filename)) destination.rmtree_p() # in case doing a re-install extracted.dirs()[0].copytree(destination) # copy nested dir contents hookenv.status_set('maintenance', 'configuring insightedge') with host.chdir(destination): insightedge_jars = subprocess.check_output([ 'bash', '-c', '. {}; get_libs ,'.format( destination / 'sbin' / 'common-insightedge.sh') ], env={ 'INSIGHTEDGE_HOME': destination }).decode('utf8') spark.register_classpaths(insightedge_jars.split(',')) set_state('insightedge.installed')
def install(self, force=False): """ Override install to handle nested zeppelin dir, and different resource name. """ filename = hookenv.resource_get('insightedge') destination = self.dist_config.path('insightedge') if not filename: return False # failed to fetch if destination.exists() and not force: return True destination.rmtree_p() # if reinstalling extracted = Path(fetch.install_remote('file://' + filename)) extracted.dirs()[0].copytree(destination) # only copy nested dir host.chownr(destination, 'ubuntu', 'root') zd = self.dist_config.path('zeppelin') / 'bin' / 'zeppelin-daemon.sh' zd.chmod('a+x') self.dist_config.add_dirs() self.dist_config.add_packages() return True
def install_layer_openmano(): status_set('maintenance', 'Installing') cfg = config() # TODO change user home # XXX security issue! host.adduser(USER, password=USER) # TODO check out a branch dest_dir = install_remote( cfg['source'], dest=INSTALL_PATH, depth='1', branch='master', ) os.mkdir(os.path.join(dest_dir, 'logs')) host.chownr(dest_dir, USER, USER) kvdb.set('repo', dest_dir) os.mkdir('/home/{}/bin'.format(USER)) os.symlink( "{}/openmano".format(dest_dir), "/home/{}/bin/openmano".format(USER)) os.symlink( "{}/scripts/openmano-report.sh".format(dest_dir), "/home/{}/bin/openmano-report.sh".format(USER)) os.symlink( "{}/scripts/service-openmano.sh".format(dest_dir), "/home/{}/bin/service-openmano".format(USER)) open_port(9090) set_state('openmano.installed')
def _git_clone_and_install_single(repo, branch, update_requirements=False): """Clone and install a single git repository.""" dest_parent_dir = "/mnt/openstack-git/" dest_dir = os.path.join(dest_parent_dir, os.path.basename(repo)) if not os.path.exists(dest_parent_dir): juju_log('Host dir not mounted at {}. ' 'Creating directory there instead.'.format(dest_parent_dir)) os.mkdir(dest_parent_dir) if not os.path.exists(dest_dir): juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) repo_dir = install_remote(repo, dest=dest_parent_dir, branch=branch) else: repo_dir = dest_dir if update_requirements: if not requirements_dir: error_out('requirements repo must be cloned before ' 'updating from global requirements.') _git_update_requirements(repo_dir, requirements_dir) juju_log('Installing git repo from dir: {}'.format(repo_dir)) pip_install(repo_dir) return repo_dir
def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements): """ Clone and install a single git repository. """ dest_dir = os.path.join(parent_dir, os.path.basename(repo)) if not os.path.exists(parent_dir): juju_log('Directory already exists at {}. ' 'No need to create directory.'.format(parent_dir)) os.mkdir(parent_dir) if not os.path.exists(dest_dir): juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) repo_dir = install_remote(repo, dest=parent_dir, branch=branch) else: repo_dir = dest_dir if update_requirements: if not requirements_dir: error_out('requirements repo must be cloned before ' 'updating from global requirements.') _git_update_requirements(repo_dir, requirements_dir) juju_log('Installing git repo from dir: {}'.format(repo_dir)) pip_install(repo_dir) return repo_dir
def trove_dashboard_git_clone(config_yaml): """ Clone from git repository specified in the config.yaml. Assuming here the trove dashboard is not supplied in a normal distro package, meaning the only install option is to specify the git url in the config.yaml. (No default location is specified here either in the code.) """ config = _git_yaml_load(config_yaml) git_repository = None for c in config['repositories']: if c['name'] == TROVE_DASHBOARD: git_repository = c['repository'] git_branch = c['branch'] if git_repository is None: error_out('Missing repository in config.yaml') juju_log('Git repository: {} branch: {}'.format(git_repository, git_branch)) depth = '1' parent_dir = GIT_CLONE_PARENT_DIR clone_dir = install_remote(git_repository, dest=parent_dir, branch=git_branch, depth=depth) juju_log('Cloned into directory: {}'.format(clone_dir)) return clone_dir
def install_layer_openmano(): status_set('maintenance', 'Installing') cfg = config() # TODO change user home # XXX security issue! host.adduser(USER, password=USER) # TODO check out a branch dest_dir = install_remote( cfg['source'], dest=INSTALL_PATH, depth='1', branch='master', ) os.mkdir(os.path.join(dest_dir, 'logs')) host.chownr(dest_dir, USER, USER) kvdb.set('repo', dest_dir) os.mkdir('/home/{}/bin'.format(USER)) os.symlink("{}/openmano".format(dest_dir), "/home/{}/bin/openmano".format(USER)) os.symlink("{}/scripts/openmano-report.sh".format(dest_dir), "/home/{}/bin/openmano-report.sh".format(USER)) os.symlink("{}/scripts/service-openmano.sh".format(dest_dir), "/home/{}/bin/service-openmano".format(USER)) open_port(9090) set_state('openmano.installed')
def install_uca_tracker(): apt_install(['python-git', 'python-yaml'], fatal=True) install_remote('lp:ubuntu-reports', dest='/opt') for template, target_info in template_map.items(): render(source=template, target=target_info['target'], owner=target_info['owner'], group=target_info['group'], perms=target_info['perms'], context=target_info['context']) if not os.path.exists('/var/log/uca-tracker'): os.mkdir('/var/log/uca-tracker') chownr('/usr/share/nginx/www', 'www-data', 'www-data') set_state('uca-tracker.installed')
def deploy_service_payload(payload_path): """ Gets serial vault payload, uncompresses it in a temporary folder and: - moves serial-vault and serial-vault-admin to /usr/lib/serial-vault - moves static assets to /usr/share/serial-vault - moves serial-vault.service to /etc/systemd/system - creates settings and store in /etc/serial-vault/settings.yaml - creates launchers and stores them in /usr/bin which will use the ones in /usr/lib/serial-vault """ hookenv.status_set('maintenance', 'Deploy service payload') # In case there is no payload path, read it from config payload setting if not payload_path: config = hookenv.config() payload_path = config['payload'] if not payload_path: raise Exception('payload not available') tmp_dir = tempfile.mkdtemp() payload_dir = install_remote(payload_path, dest=tmp_dir) if payload_dir == tmp_dir: log('Got binaries tgz at {}'.format(payload_dir)) if not os.path.isfile(os.path.join(payload_dir, 'serial-vault')): log('Could not find serial-vault binary') return if not os.path.isfile(os.path.join(payload_dir, 'serial-vault-admin')): log('Could not find serial-vault-admin binary') return if not os.path.isdir(os.path.join(payload_dir, 'static')): log('Could not find static assets') return # In case this is updating assets, remove old ones folder. if os.path.exists(ASSETSDIR): shutil.rmtree(ASSETSDIR) os.mkdir(ASSETSDIR, mode=755) if not os.path.exists(CONFDIR): os.mkdir(CONFDIR, mode=755) if not os.path.exists(LIBDIR): os.mkdir(LIBDIR, mode=755) shutil.move(os.path.join(payload_dir, 'serial-vault'), os.path.join(LIBDIR, 'serial-vault')) shutil.move(os.path.join(payload_dir, 'serial-vault-admin'), os.path.join(LIBDIR, 'serial-vault-admin')) shutil.move(os.path.join(payload_dir, 'static'), ASSETSDIR) shutil.copy(SYSTEMD_UNIT_FILE, '/etc/systemd/system/') create_launchers() # Reload daemon, as systemd service task file has been overriden reload_systemd() # Deploy cron job to cache store accounts if this is an admin service unit deploy_cache_accounts_cron_job() hookenv.status_set('maintenance', 'Service payload deployed')
def install(): if config.get("install-sources"): configure_sources(update=True, sources_var="install-sources", keys_var="install-keys") # install packages apt_install(PACKAGES, fatal=True) install_url = config["install-url"] if install_url: # install opendaylight from tarball # this extracts the archive too install_remote(install_url, dest="/opt") # The extracted dirname. Look at what's on disk instead of mangling, so # the distribution tar.gz's name doesn't matter. install_dir_name = [ f for f in os.listdir("/opt") if f.startswith("distribution-karaf")][0] if not os.path.exists("/opt/opendaylight-karaf"): os.symlink(install_dir_name, "/opt/opendaylight-karaf") else: apt_install([KARAF_PACKAGE], fatal=True) install_dir_name = "opendaylight-karaf" if init_is_systemd(): shutil.copy("files/odl-controller.service", "/lib/systemd/system") service('enable', 'odl-controller') else: shutil.copy("files/odl-controller.conf", "/etc/init") adduser("opendaylight", system_user=True) mkdir("/home/opendaylight", owner="opendaylight", group="opendaylight", perms=0755) check_call( ["chown", "-R", "opendaylight:opendaylight", os.path.join("/opt", install_dir_name)]) mkdir("/var/log/opendaylight", owner="opendaylight", group="opendaylight", perms=0755) # install features write_mvn_config() service_start("odl-controller")
def git_clone(src, destination): ''' Use install_remote to clone git repos ''' cloned_dir = None cloned_dir = install_remote(src, dest=destination, branch='master', depth=None) return cloned_dir
def fetch_package(): # first concern - we haven't checked config cfg = config() if not(cfg.get('download_url') and cfg.get('download_sum')): # we are not configured if this is true, message to the user # and return status_set('blocked', 'Charm is not configured, please set ' 'download_url and download_sum') return download_path = install_remote(cfg['download_url'], cfg['download_sum']) install_dslam_manager(download_path) set_state('dlsam_manager.fetched')
def config_changed(): config = hookenv.config() if config.changed('repo'): repo = config['repo'] if repo != "": hookenv.status_set('waiting', 'cloning %s' % repo) dest = install_remote(repo) hookenv.status_set('waiting', 'cloned %s' % repo) # Consider payloads subprocess.check_call( os.path.join(dest, "remote-deployer"), cwd=dest, ) hookenv.status_set('active', 'Ready')
def config_changed(): config = hookenv.config() if config.changed('repo'): repo = config['repo'] if repo != "": hookenv.status_set('waiting', 'cloning %s' % repo) dest = install_remote(repo) hookenv.status_set('waiting', 'cloned %s' % repo) # Consider payloads subprocess.check_call( os.path.join(dest, "github-deployer"), cwd=dest, ) hookenv.status_set('active', 'Ready')
def install(): if db.get("repo") != config["repo"]: status_set("maintenance", "Installing app") apt_install(APT_PKGS) tmp_dir = install_remote(config["repo"], dest="/tmp", depth=1) shutil.rmtree(APP_DIR, ignore_errors=True) log("Moving app source from {} to {}".format(tmp_dir, APP_DIR)) shutil.move(tmp_dir, APP_DIR) subprocess.check_call("make .venv".split(), cwd=APP_DIR) shutil.copyfile(UPSTART_SRC, UPSTART_DEST) chownr(APP_DIR, APP_USER, APP_GROUP) db.set("repo", config["repo"]) if config.changed("port"): open_port(config["port"]) if config.previous("port"): close_port(config.previous("port"))
def git_clone(config_yaml): """ Clone from git repository specified in the config.yaml. opsmgr is not supplied in a normal distro package the only install option is to specify the git url in the config.yaml. (No default location is specified here either in the code.) """ git_repository = config('git-repository') git_branch = config('git-branch') juju_log('Git repository: {} branch: {}'.format(git_repository, git_branch)) depth = '1' parent_dir = GIT_CLONE_PARENT_DIR clone_dir = install_remote(git_repository, dest=parent_dir, branch=git_branch, depth=depth) juju_log('Cloned into directory: {}'.format(clone_dir)) return clone_dir
def clone_repository(branch='master'): ''' Wrapper method around charmhelpers.install_remote to handle fetching of a vcs url to deploy a static website for use in the NGinx container. ''' repo_dir = None if config.get('repository'): hookenv.status_set('maintenance', 'Cloning repository') if not config.changed('repository'): repo_dir = db.get('repo_dir') repo_dir = install_remote(config.get('repository'), dest=config.get('webroot'), branch=branch, depth=None) db.set('repo_dir', repo_dir) stop_container() run_container(repo_dir) hookenv.status_set('active', '')
def install_etcd(): source = hookenv.config('source') sha = hookenv.config('source-sum') unpack = fetch.install_remote(source, 'fetched', sha) # Copy the payload into place on the system etcd_dir = path('/opt/etcd') unpack_dir = path(unpack) for d in unpack_dir.dirs(): d = path(d) for f in d.files(): f.copy(etcd_dir) for executable in "etcd", "etcdctl": origin = etcd_dir / executable target = path('/usr/local/bin/%s' % executable) target.exists() and target.remove() origin.symlink(target) hookenv.open_port(4001) db.set('installed', True)
def install_etcd(): source = hookenv.config('bin-source') sha = hookenv.config('source-sum') unpack = fetch.install_remote(source, 'fetched', sha) # Copy the payload into place on the system etcd_dir = path('/opt/etcd') unpack_dir = path(unpack) for d in unpack_dir.dirs(): d = path(d) for f in d.files(): f.copy(etcd_dir) for executable in "etcd", "etcdctl": origin = etcd_dir / executable target = path('/usr/local/bin/%s' % executable) target.exists() and target.remove() origin.symlink(target) hookenv.open_port(4001) db.set('installed', True)
def install(self, force=False): ''' Create the directories. This method is to be called only once. :param bool force: Force the execution of the installation even if this is not the first installation attempt. ''' destination = self.dist_config.path('zeppelin') if not self.verify_resources(): return False if destination.exists() and not force: return True try: filename = hookenv.resource_get('zeppelin') if not filename: return False if Path(filename).size == 0: # work around charm store resource upload issue # by falling-back to pulling from S3 raise NotImplementedError() destination.rmtree_p() # if reinstalling extracted = Path(fetch.install_remote('file://' + filename)) extracted.dirs()[0].copytree(destination) # only copy nested dir except NotImplementedError: if not jujuresources.resource_defined(self.resources['zeppelin']): return False if not utils.verify_resources(*self.resources.values())(): return False jujuresources.install(self.resources['zeppelin'], destination=destination, skip_top_level=True) self.dist_config.add_dirs() self.dist_config.add_packages() return True
def source_install(dcfg): source = dcfg.get('source', {}) status_set('maintenance', 'installing %s repo' % source['url']) if not os.path.exists(dcfg.get('install-path')): os.makedirs(dcfg.get('install-path')) source_path = install_remote(source['url'], dest=dcfg.get('install-path')) dcfg.set('source-path', source_path) status_set('maintenance', 'installing project deps') if dcfg.get('pip-requirements'): django.call([django.pip(), 'install', '-r', dcfg.get('pip-requirements')]) render(source='circus.ini.j2', target='/etc/circus.ini', owner='root', group='root', perms=0o644, context={ 'install_path': source_path, 'wsgi': dcfg.get('wsgi'), 'port': config('django-port'), 'config_import': dcfg.get('config-import'), }) render(source='circus.conf.j2', target='/etc/init/circus.conf', owner='root', group='root', perms=0o644, context={}) set_state('django.source.available') set_state('django.restart')
def install_site(name, site): hookenv.status_set('maintenance', 'Installing %s' % name) dest = '/var/www/%s' % name fetch.install_remote(dest=dest, **site['install_from']) strip_archive_dir(dest) configure_site(name, site)
def clone_repo(): install_remote(hookenv.config('repo'))
def clone_repo(): install_remote(hookenv.config('repo'), dest='/home/ubuntu/repo') subprocess.check_call(['pip', 'install', 'virtualenv']) subprocess.check_call(['apt-get', 'install', '-y', 'libpq-dev', 'python-dev', 'python-ubuntu-sso-client']) # TODO(wwitzel3) fix the hard coded repo path subprocess.check_call(['make', 'install'], cwd=PATH)