def patch_text(patch, reverse=False): '''Apply text patch to remote server.''' def cmd(reverse, dry, path): cmd = ['patch'] if dry: cmd.append('--dry-run') if reverse: cmd.append('-R') cmd.append('-p1') cmd.append('<') cmd.append(path) return ' '.join(cmd) require('base') remotep = _remote('%s.patch'%patch) put(join(path(patch), 'text.patch'), remotep) if confirm('Dry run patch?'): with settings(show('stdout'), warn_only = True): with cd(env.base): run(cmd(reverse, True, remotep)) if confirm('Execute patch?'): with settings(show('stdout'), warn_only = True): with cd(env.base): run(cmd(reverse, False, remotep)) log('Applied text patch: %s'%patch) run('mv %s patches'%remotep)
def dependencies(): """Installe les modules pythons nécessaires au projet""" with settings(show("user"), hide("warnings", "running", "stdout", "stderr")): with cd("projects/%(projet)s" % env): if exists("requirements.txt"): print(yellow("Installation des dépendances du projet...")) with prefix("workon %(projet)s" % env): with settings(show("running", "stdout", "stderr")): run("pip install -r requirements.txt") else: print(red('Aucun fichier "requirements.txt" trouvé.'))
def dependencies(): '''Installe les modules pythons nécessaires au projet''' with settings(show('user'), hide('warnings', 'running', 'stdout', 'stderr')): with cd('projects/%(projet)s' % env): if exists('requirements.txt'): print(yellow('Installation des dépendances du projet...')) with prefix('workon %(projet)s' % env): with settings(show('running', 'stdout', 'stderr')): run('pip install -r requirements.txt') else: print(red('Aucun fichier "requirements.txt" trouvé.'))
def dependencies(): '''Vérification des modules nécessaires au projet''' with settings(show('user'), hide('warnings', 'running', 'stdout', 'stderr')): with cd('%(base_install)s' % env): if exists('requirements.txt'): print(yellow('Installation des dépendances du projet...')) with prefix('workon %(projet)s' % env): with settings(show('running', 'stdout', 'stderr')): run('pip install -r requirements.txt') else: print(red('Aucun fichier "requirements.txt" trouvé.')) with prefix('workon %(projet)s' % env): with cd('%(base_install)s' % env): run('./manage.py collectstatic --noinput')
def pull_seaflowpy(): gitdir = "/home/ubuntu/git" repodir = os.path.join(gitdir, "seaflowpy") with quiet(): if run("test -d {}".format(gitdir)).failed: run("mkdir {}".format(gitdir)) if run("test -d {}".format(repodir)).failed: with show("running", "warnings", "stderr"): run("git clone https://github.com/armbrustlab/seaflowpy {}".format(repodir)) with cd(repodir), hide("stdout"): run("git pull") run("python setup.py install") run("python setup.py test") with show("stdout"): run("seaflowpy_filter --version")
def django_project(): """Créer un projet django dans son virtualenv""" with settings(show("user"), hide("warnings", "running", "stdout", "stderr")): project() domain() locale() if not exists("/home/%(user)s/.virtualenvs/%(projet)s" % env): # if confirm('Pas de virtualenv "%(projet)s", faut-il le créer ?' % env, default=False): run("mkvirtualenv %(projet)s" % env) run("source .bash_profile") with prefix("workon %(projet)s" % env): run("pip install django") print(green("Django installé.")) if not exists("projects/%s/" % (env.projet)): print(yellow("le projet %(projet)s n’existe pas encore" % env)) if confirm('Créer un projet django nommé "%(projet)s" ?' % env, default=False): with cd("projects/"): with prefix("workon %(projet)s" % env): run("django-admin.py startproject %s" % (env.projet)) print(green('Projet Django "%(projet)s" : OK.' % env)) else: print(green('Projet Django "%(projet)s" : OK.' % env)) # créer la db avec le nom du projet (idempotent) icanhaz.postgres.database(env.projet, env.user, template="template_postgis", locale=env.locale) print(green("Base de données %(projet)s : OK." % env)) django_wsgi() apache_vhost() dependencies() sudo("apachectl restart")
def fabcommand(args, actions): levels = [ "status", "aborts", "warnings", "running", "stdout", "stderr", "user", "debug" ] level_aliases = { "output": ["stdout", "stderr"], "everything": ["warnings", "running", "user", "output"] } lhide = level_aliases["everything"] lshow = [] if args.verbose == 1: lshow = levels[:3] lhide = levels[3:] elif args.verbose == 2: lshow = levels[:4] lhide = levels[4:] elif args.verbose >= 3 or args.debug: lshow = levels lhide = [] # fabcmd += " --fabfile {4}/fabfile.py \ # setup_env:confdir={0},packages={1},templates={2},cluster_name={3},\ # autoconf={5},disable_colors={6},key_inject={7} \ # ".format(args.confdir, env.packages, env.templates, args.cluster_name, # env.lib, args.autoconf, args.disable_colors, args.key_inject) with settings(hide(*lhide), show(*lshow)): for a in actions: fn = getattr(fabfile, a) execute(fn)
def dsetool_cmd(nodes, options): """Run a dsetool command simultaneously on each node specified""" cmd = 'JAVA_HOME={java_home} {dsetool_cmd} {options}'.format(java_home=JAVA_HOME, dsetool_cmd=os.path.join(dse.get_bin_path(), 'dsetool'), options=options) with common.fab.settings(fab.show('warnings', 'running', 'stdout', 'stderr'), hosts=nodes, warn_only=True): return execute(fab.run, cmd)
def server_setup(): '''Installation serveur pour Ubuntu >= 10.10''' with settings(show('user'), hide('warnings', 'running', 'stdout', 'stderr')): sudo('apt-get -y install aptitude') # demande un input print(yellow('Mise à jour de l’index APT...')) fabtools.deb.update_index() # apt-get quiet update print(yellow('Mise à jour des paquets debian installés...')) fabtools.deb.upgrade() # paquets communs à tous les serveurs Django+geodjango print(yellow('Installation des paquets de base...')) pretty_apt([ 'git-core', 'mercurial', 'gcc', 'curl', 'build-essential', 'libfreetype6', 'libfreetype6-dev', 'liblcms1-dev', 'libpng12-dev', 'libjpeg8-dev', 'python-imaging', 'supervisor', 'python-setuptools', 'nano', 'python-dev', 'swig', 'memcached', 'python-memcache', 'libgeoip1' ]) # pip special case if not fabtools.python.is_pip_installed(): fabtools.python.install_pip() print(green('pip : installé.')) virtualenv_setup() #config apache apache_setup() postgresql()
def fabcommand(args, actions): levels = ["status", "aborts", "warnings", "running", "stdout", "stderr", "user", "debug"] level_aliases = { "output": ["stdout", "stderr"], "everything": ["warnings", "running", "user", "output"] } lhide = level_aliases["everything"] lshow = [] if args.verbose == 1: lshow = levels[:3] lhide = levels[3:] elif args.verbose == 2: lshow = levels[:4] lhide = levels[4:] elif args.verbose >= 3 or args.debug: lshow = levels lhide = [] # fabcmd += " --fabfile {4}/fabfile.py \ # setup_env:confdir={0},packages={1},templates={2},cluster_name={3},\ # autoconf={5},disable_colors={6},key_inject={7} \ # ".format(args.confdir, env.packages, env.templates, args.cluster_name, # env.lib, args.autoconf, args.disable_colors, args.key_inject) with settings(hide(*lhide), show(*lshow)): for a in actions: fn = getattr(fabfile, a) execute(fn)
def postgresql(): '''PostgreSQL 9.1 + PostGIS 1.5''' with settings(show('user'), hide('warnings', 'running', 'stdout', 'stderr')): #if 'pgpass' not in env.keys(): # prompt('Passe PostgreSQL :', default=pgpass, key='pgpass') print(yellow('Configuration PostgreSQL+PostGIS...')) pretty_apt(['postgresql', 'binutils', 'gdal-bin', 'libproj-dev', 'postgresql-9.1-postgis', 'postgresql-server-dev-9.1', 'python-psycopg2', 'libgeoip1']) # print(yellow('Upgrading all packages...')) # fabtools.deb.upgrade() # création d'un utilisateur postgresql avec le meme nom d'utilisateur if not fabtools.postgres.user_exists(env.user): fabtools.postgres.create_user(env.user, env.pg_pass) sudo('''psql -c "ALTER ROLE %(user)s CREATEDB;"''' % env, user='******') sudo('''psql -c "ALTER USER %(user)s with SUPERUSER;"''' % env, user='******') print(green('Création d’un superuser "%(user)s" PostgreSQL.' % env)) if not exists('.pgpass'): run('echo "*:*:*:%(user)s:%(pg_pass)s" >> .pgpass' % env) sudo('chmod 0600 .pgpass') print(green('Création du fichier .pgpass.')) run('curl https://docs.djangoproject.com/en/dev/_downloads/create_template_postgis-debian.sh -o postgis.sh') run('chmod +x postgis.sh') run('./postgis.sh') #postgresql_net_access() require.postgres.server() # start server
def test_perf(params=''): """ Runs the performance tests against the configured service and produce the report in dist/ :params str params: Parameters to pass to Funkload bench Examples:: fab dist.test_perf fab dist.test_perf:"-c 1:15 -D 1" """ try: import funkload except ImportError: abort('Funkload module missing, please install it first') # Create report folder if needed report_dir = dist_join('report/html') if not os.path.exists(report_dir): os.makedirs(report_dir) # Run the Funkload tests in perf test folder with lcd(rel_join('tests/perf')): with settings(show('running', 'stdout')): local('fl-run-bench %s tests.py MultiprojectTestCase.test_smoke' % params) local('fl-build-report -o %s --html smoke-bench.xml' % report_dir) logger.info('Testing completed. Test report can be found in: %s' % report_dir)
def run(self, *args, **kwargs): """Called by fabric. Will set up the task and launch it. Do not override this in your children tasks, use `operation` instead.""" # check if the task is called via Python or via `fab` in terminal self.called_via_fab = self._called_via_fab self._called_via_fab = True try: # display help if the users ask for it, then exit assert args[0] == "help" self.logging = True self.log(self.get_usage()) return except: pass self.setup(*args, **kwargs) self.pre_run() with hide(*self.hide), show(*self.show): # run the actual task logic r = self.operation(*self.args, **self.kwargs) self.post_run() return r
def pvcluster(remote_dir, paraview_cmd, paraview_args, paraview_port, paraview_remote_port, job_dict, shell_cmd): with show('debug'), \ remote_tunnel(int(paraview_remote_port), local_port=int(paraview_port)): with shell_env(PARAVIEW_CMD=paraview_cmd, PARAVIEW_ARGS=paraview_args): run('echo $PARAVIEW_HOME') run('echo $PARAVIEW_ARGS') run('mkdir -p ' + remote_dir) with cd(remote_dir): cmd_line = shell_cmd cmd_line += 'mycluster --create pvserver.job --jobname=pvserver' cmd_line += ' --jobqueue ' + job_dict['job_queue'] cmd_line += ' --ntasks ' + job_dict['job_ntasks'] cmd_line += ' --taskpernode ' + job_dict['job_ntaskpernode'] if 'vizstack' in paraview_args: cmd_line += ' --script mycluster-viz-paraview.bsh' else: cmd_line += ' --script mycluster-paraview.bsh' cmd_line += ' --project ' + job_dict['job_project'] run(cmd_line) run('chmod u+rx pvserver.job') run(shell_cmd+'mycluster --immediate --submit pvserver.job')
def django_project(): '''Créer un projet django dans son virtualenv''' with settings(show('user'), hide('warnings', 'running', 'stdout', 'stderr')): project() domain() locale() if not exists('/home/%(user)s/.virtualenvs/%(projet)s' % env): #if confirm('Pas de virtualenv "%(projet)s", faut-il le créer ?' % env, default=False): run('mkvirtualenv %(projet)s' % env) run('source .bash_profile') with prefix('workon %(projet)s' % env): run('pip install django') print(green('Django installé.')) if not exists('projects/%s/' % (env.projet)): print(yellow('le projet %(projet)s n’existe pas encore' % env)) if confirm('Créer un projet django nommé "%(projet)s" ?' % env, default=False): with cd('projects/'): with prefix('workon %(projet)s' % env): run('django-admin.py startproject %s' % (env.projet)) print(green('Projet Django "%(projet)s" : OK.' % env)) else: print(green('Projet Django "%(projet)s" : OK.' % env)) #créer la db avec le nom du projet (idempotent) icanhaz.postgres.database(env.projet, env.user, template='template_postgis', locale=env.locale) print(green('Base de données %(projet)s : OK.' % env)) django_wsgi() apache_vhost() dependencies() sudo('apachectl restart')
def free_cache(): if dconf.HOST_CONN != 'docker': with show('everything'), settings(warn_only=True): # pylint: disable=not-context-manager res = sudo("sh -c \"echo 3 > /proc/sys/vm/drop_caches\"") if res.failed: LOG.error('%s (return code %s)', res.stderr.strip(), res.return_code)
def servers_deploy(): #Skipping virtualenv because of complication from different server OS versios etc. with show('debug'): # Create experiment directory if not exists run('apt-get update', shell=False) # Because of error that curl was not installed when cloning # Because of confusion between git and git-fm for olde debian versions run('apt-get install -y curl git-core screen rsync', shell=False) with settings(warn_only=True): if (run("test -d %s" % '/root/ttfb', shell=False).return_code) == 1: print 'Creating working directory' run("mkdir %s" % '/root/ttfb', shell=False) with cd('/root/ttfb'): with settings(warn_only=True): if (run("test -d %s" % 'TTFB_ML', shell=False).return_code) == 1: print 'Cloning repository' #Using git in the url since HTTP does not work for older versions of git run("git clone git://github.com/emmdim/TTFB_ML.git", shell=False) #Not using env.code_dir_clients cause olde git versions do not pull when in subfolder with cd('/root/ttfb/TTFB_ML'): run("git pull", shell=False) run("ls", shell=False)
def postgresql(): '''PostgreSQL 8.4 + PostGIS 1.5''' with settings(show('user'), hide('warnings', 'running', 'stdout', 'stderr')): project() if 'pgpass' not in env.keys(): prompt('Passe PostgreSQL :', default=pgpass, key='pgpass') print(yellow('Configuration PostgreSQL+PostGIS...')) pretty_apt([ 'libpq-dev', 'binutils', 'gdal-bin', 'libproj-dev', 'postgresql-8.4-postgis', 'postgresql-server-dev-8.4', 'python-psycopg2' ]) fabtools.deb.upgrade() # création d'un utilisateur postgresql avec le meme nom d'utilisateur if not fabtools.postgres.user_exists(env.user): fabtools.postgres.create_user(env.user, env.pgpass) sudo('''psql -c "ALTER ROLE %(user)s CREATEDB;"''' % env, user='******') sudo('''psql -c "ALTER USER %(user)s with SUPERUSER;"''' % env, user='******') print(green('Création d’un superuser "%(user)s" PostgreSQL.' % env)) if not exists('.pgpass'): run('echo "*:*:*:%(user)s:%(pgpass)s" >> .pgpass' % env) sudo('chmod 0600 .pgpass') print(green('Création du fichier .pgpass.')) postgis_template() postgresql_net_access() icanhaz.postgres.server() #start server
def setup(): '''Installation de base pour Ubuntu >= 10.10''' with settings(show('user'), hide('warnings', 'running', 'stdout', 'stderr')): project() domain() #if gandi locale() sudo('apt-get install aptitude') print(yellow('Mise à jour de l’index APT...')) fabtools.deb.update_index() # apt-get quiet update print(yellow('Mise à jour des paquets debian installés...')) fabtools.deb.upgrade() # paquets communs à tous les serveurs Django+geodjango print(yellow('Installation des paquets de base...')) pretty_apt([ 'git-core', 'mercurial', 'gcc', 'curl', 'build-essential', 'python-imaging', 'python-setuptools', 'nano', 'memcached', 'python-memcache' ]) # pip special case if not fabtools.python.is_pip_installed(): fabtools.python.install_pip() print(green('pip : installé.')) environnement() #config apache if (env.websrv == 1): apache() elif (env.websrv == 2): apache_nginx()
def postgresql(): '''PostgreSQL 9.1 + PostGIS 1.5''' with settings(show('user'), hide('warnings', 'running', 'stdout', 'stderr')): #if 'pgpass' not in env.keys(): # prompt('Passe PostgreSQL :', default=pgpass, key='pgpass') print(yellow('Configuration PostgreSQL+PostGIS...')) pretty_apt([ 'postgresql', 'binutils', 'gdal-bin', 'libproj-dev', 'postgresql-9.1-postgis', 'postgresql-server-dev-9.1', 'python-psycopg2', 'libgeoip1' ]) # print(yellow('Upgrading all packages...')) # fabtools.deb.upgrade() # création d'un utilisateur postgresql avec le meme nom d'utilisateur if not fabtools.postgres.user_exists(env.user): fabtools.postgres.create_user(env.user, env.pg_pass) sudo('''psql -c "ALTER ROLE %(user)s CREATEDB;"''' % env, user='******') sudo('''psql -c "ALTER USER %(user)s with SUPERUSER;"''' % env, user='******') print(green('Création d’un superuser "%(user)s" PostgreSQL.' % env)) if not exists('.pgpass'): run('echo "*:*:*:%(user)s:%(pg_pass)s" >> .pgpass' % env) sudo('chmod 0600 .pgpass') print(green('Création du fichier .pgpass.')) run('curl https://docs.djangoproject.com/en/dev/_downloads/create_template_postgis-debian.sh -o postgis.sh' ) run('chmod +x postgis.sh') run('./postgis.sh') #postgresql_net_access() require.postgres.server() # start server
def postgresql(): """PostgreSQL 8.4 + PostGIS 1.5""" with settings(show("user"), hide("warnings", "running", "stdout", "stderr")): project() if "pgpass" not in env.keys(): prompt("Passe PostgreSQL :", default=pgpass, key="pgpass") print(yellow("Configuration PostgreSQL+PostGIS...")) pretty_apt( [ "libpq-dev", "binutils", "gdal-bin", "libproj-dev", "postgresql-8.4-postgis", "postgresql-server-dev-8.4", "python-psycopg2", ] ) fabtools.deb.upgrade() # création d'un utilisateur postgresql avec le meme nom d'utilisateur if not fabtools.postgres.user_exists(env.user): fabtools.postgres.create_user(env.user, env.pgpass) sudo('''psql -c "ALTER ROLE %(user)s CREATEDB;"''' % env, user="******") sudo('''psql -c "ALTER USER %(user)s with SUPERUSER;"''' % env, user="******") print(green('Création d’un superuser "%(user)s" PostgreSQL.' % env)) if not exists(".pgpass"): run('echo "*:*:*:%(user)s:%(pgpass)s" >> .pgpass' % env) sudo("chmod 0600 .pgpass") print(green("Création du fichier .pgpass.")) postgis_template() postgresql_net_access() icanhaz.postgres.server() # start server
def _create_or_update_virtualenv(virtualenv_root, virtualenv_name, requirements_paths, virtualenv_flags=None): with show('output'): virtualenv_path = '/'.join((virtualenv_root, virtualenv_name)) if not exists(virtualenv_path): if virtualenv_flags is None: virtualenv_flags = '' puts("virtualenv not found in {}, creating one.".format(virtualenv_root)) run("virtualenv {} {}".format(virtualenv_path, virtualenv_flags)) if isinstance(requirements_paths, string_types): requirements_paths = [requirements_paths] for requirements_path in requirements_paths: puts("Uploading {} to temporary file.".format(requirements_path)) temp_req = run("mktemp /tmp/streamparse_requirements-XXXXXXXXX.txt") put(requirements_path, temp_req) puts("Updating virtualenv: {}".format(virtualenv_name)) cmd = "source {}".format(os.path.join(virtualenv_path, 'bin/activate')) with prefix(cmd): # Make sure we're using latest pip so options work as expected run("pip install --upgrade 'pip~=9.0'", pty=False) run("pip install -r {} --exists-action w --upgrade " "--upgrade-strategy only-if-needed".format(temp_req), pty=False) run("rm {}".format(temp_req))
def remote_build(hoststring, password): env.host_string = hoststring env.password = password env.abort_on_prompts = True env.reject_unknown_hosts = False with show('exceptions'): try: run('clang-format-3.7 --version') except: install_clang_format() if not fabric.contrib.files.exists('~/desktop-crashup'): run('mkdir ~/desktop-crashup') things_to_put = [ 'demoapp', 'crashup', 'tests', 'cmake', 'build_linux.py', 'CMakeLists.txt' ] for f in things_to_put: put_tar(f, '~/desktop-crashup/') with cd('~/desktop-crashup/'): if not fabric.contrib.files.exists('~/desktop-crashup/venv'): run('virtualenv venv') with prefix('source venv/bin/activate'): # verbose version in case of problems: # run('xvfb-run -e /dev/stdout -a python build_linux.py') # '-a' to try different display number if 99 is already taken run('xvfb-run -a python build_linux.py')
def server_setup(): '''Installation serveur pour Ubuntu >= 10.10''' with settings(show('user'), hide('warnings', 'running', 'stdout', 'stderr')): sudo('apt-get -y install aptitude') # demande un input print(yellow('Mise à jour de l’index APT...')) fabtools.deb.update_index() # apt-get quiet update print(yellow('Mise à jour des paquets debian installés...')) fabtools.deb.upgrade() # paquets communs à tous les serveurs Django+geodjango print(yellow('Installation des paquets de base...')) pretty_apt(['git-core', 'mercurial', 'gcc', 'curl', 'build-essential', 'libfreetype6', 'libfreetype6-dev', 'liblcms1-dev', 'libpng12-dev', 'libjpeg8-dev', 'python-imaging', 'supervisor', 'python-setuptools', 'nano', 'python-dev', 'swig', 'memcached', 'python-memcache', 'libgeoip1']) # pip special case if not fabtools.python.is_pip_installed(): fabtools.python.install_pip() print(green('pip : installé.')) virtualenv_setup() #config apache apache_setup() postgresql()
def test_perf(params=''): """ Runs the performance tests against the configured service and produce the report in dist/ :params str params: Parameters to pass to Funkload bench Examples:: fab dist.test_perf fab dist.test_perf:"-c 1:15 -D 1" """ try: import funkload except ImportError: abort('Funkload module missing, please install it first') # Create report folder if needed report_dir = dist_join('report/html') if not os.path.exists(report_dir): os.makedirs(report_dir) # Run the Funkload tests in perf test folder with lcd(rel_join('tests/perf')): with settings(show('running','stdout')): local('fl-run-bench %s tests.py MultiprojectTestCase.test_smoke' % params) local('fl-build-report -o %s --html smoke-bench.xml' % report_dir) logger.info('Testing completed. Test report can be found in: %s' % report_dir)
def remote_tmpdir(): """Create and return the name of a temporary directory at the remote location. """ with settings(show('stdout')): return remote_py_cmd(['import tempfile', 'print tempfile.mkdtemp()'])
def remote_listdir(path): """Return a list of files found in the given remote directory.""" with settings(show('stdout')): s = remote_py_cmd(["import os", "print os.listdir('%s')" % path.replace('\\', '/')]) s = s.strip()[1:-1] return [part.strip("'") for part in s.split(', ')]
def coop_project_setup(): '''Créer un projet django dans son virtualenv''' set_project() set_domain() with settings(show('user')):#, hide('warnings', 'running', 'stdout', 'stderr')): if not exists('/home/%(user)s/.virtualenvs/%(projet)s' % env): # if confirm('Pas de virtualenv "%(projet)s", faut-il le créer ?' % env, default=False): run('mkvirtualenv --system-site-packages %(projet)s' % env) run('source .bash_profile') with prefix('workon %(projet)s' % env): run('pip install git+git://github.com/credis/django-coop.git') print(green('Django et django-coop installé.')) if not exists('projects/%s/' % (env.projet)): print(yellow('le projet %(projet)s n’existe pas encore' % env)) if confirm('Créer un projet django nommé "%(projet)s" ?' % env, default=False): with cd('projects/'): with prefix('workon %(projet)s' % env): run('coop-admin.py startproject %(projet)s --domain %(domain)s' % env) print(green('Projet Django-coop "%(projet)s" : Installé.' % env)) # coop-admin scripts creates the WSGI script so we won't call django_wsgi() with cd('projects/%(projet)s' % env): with prefix('workon %(projet)s' % env): run('chmod +x manage.py') run('chmod -R g+rw media') vrfy_supervisor_conf() else: print(yellow('Projet Django-coop nommé "%(projet)s" : déjà installé.' % env)) # TODO proposer de réinstaller vrfy_supervisor_conf()
def coop_set_project(): '''Créer un projet django dans son virtualenv''' with settings(show('user'), hide('warnings', 'running', 'stdout', 'stderr')): if not exists('/home/%(user)s/.virtualenvs/%(projet)s' % env): # if confirm('Pas de virtualenv "%(projet)s", faut-il le créer ?' % env, default=False): run('mkvirtualenv --no-site-packages %(projet)s' % env) run('source .bash_profile') with cd('%(base_install)s' % env): with prefix('workon %(projet)s' % env): run('chmod +x manage.py') run('mkdir media') run('chmod -R g+rw media') else: #with prefix('workon %(projet)s' % env): #run('pip install --timeout=240 -r %(base_install)s/requirements.txt' % env) print(yellow('Projet Django-coop nommé "%(projet)s" : déjà installé.' % env)) # TODO proposer de réinstaller with prefix('workon %(projet)s' % env): print(yellow('Récupération des dépendances python du projet coop-mes')) run('pip install --timeout=1024 --use-mirrors -r %(base_install)s/requirements.txt' % env) print(green('Récupération des dépendances python du projet coop-mes')) # Création du répertoire de logs if not exists('%(base_install)s/logs' % env): run('mkdir %(base_install)s/logs' % env)
def download_master_builder_key(stackname): pdata = project_data_for_stackname(stackname) region = pdata['aws']['region'] master_stack = core.find_master(region) private_key = "/root/.ssh/id_rsa" with stack_conn(master_stack): with show('exceptions'): # I actually get better exceptions with this disabled return fab_get(private_key, use_sudo=True, return_stream=True, label="master builder key %s:%s" % (master_stack, private_key))
def cqlsh(script, node): """Run a cqlsh script on a node""" global cqlsh_path script = script.replace("\n", " ") cmd = '{cqlsh_path} --no-color {host} -e "{script}"'.format(cqlsh_path=cqlsh_path, host=node, script=script) with common.fab.settings(fab.show("warnings", "running", "stdout", "stderr"), hosts=node): return execute(fab.run, cmd)[node]
def cqlsh(script, node): """Run a cqlsh script on a node""" global cqlsh_path script = script.replace('\n', ' ') cmd = '{cqlsh_path} --no-color {host} -e "{script}"'.format(cqlsh_path=cqlsh_path, host=node, script=script) with common.fab.settings(fab.show('warnings', 'running', 'stdout', 'stderr'), hosts=node): return execute(fab.run, cmd)[node]
def run_cmd(cmd, user, **kwargs): with show("everything"): with settings(warn_only=True): command_result = (run(cmd, **kwargs) if user == env.user else sudo( cmd, user=user, **kwargs)) if command_result.return_code != 0: raise RuntimeError('Command failed to run: %s' % cmd) return command_result
def relocate_env(): with cd(env.release_path): with show('output'): if env.virtualenvtools_strategy == 'update-path': run('%(virtualenvtools_executable)s --update-path %(virtualenvtools_venv_path)s' % env) elif env.virtualenvtools_strategy == 'reinitialize': run('rm %(virtualenvtools_venv_path)s/bin/python*' % env) run('%(virtualenvtools_executable)s --reinitialize %(virtualenvtools_venv_path)s' % env)
def test_local_false_global_true(self): """ combine_stderr: False kwarg => overrides global True value """ with show('everything'): env.combine_stderr = True r = run("both_streams", combine_stderr=False) eq_("stdout", r.stdout) eq_("stderr", r.stderr)
def test_local_true_global_false(self): """ combine_stderr: True kwarg => overrides global False value """ with show('everything'): env.combine_stderr = False r = run("both_streams", combine_stderr=True) eq_("ssttddoeurtr", r.stdout) eq_(r.stderr, "")
def test_local_none_global_false(self): """ combine_stderr: no kwarg => uses global value (False) """ with show('everything'): env.combine_stderr = False r = run("both_streams") eq_("stdout", r.stdout) eq_("stderr", r.stderr)
def post_run(self): """Called after the task has been run""" with show('everything'): self.trigger_hooks() # reset some attributes self.parent = None self.start_message = None
def jexec(name, command, *args, **kwargs): """ execute the given command inside the given jail by creating a new ssh session to the host. """ with settings(show("output"), warn_only=True): hosts = env.hosts env.hosts = [name] execute(command, *args, **kwargs) env.hosts = hosts
def delta(upstream='upstream', bsd=True): with cd(env.current_path): version = run("git rev-parse --short HEAD", quiet=True) local('git fetch -q %s' % upstream) with show('output'): local('git log --pretty="%%h %%s: %%b" --merges %s..%s/master | ' 'sed -%s "s/Merge pull request #([0-9]+) from ([^/]+)\\/[^:]+/#\\1\\/\\2/"' % ( version, upstream, 'E' if bsd else 'r'))
def run_local_topology( name=None, env_name=None, time=0, options=None, config_file=None ): """Run a topology locally using Flux and `storm jar`.""" name, topology_file = get_topology_definition(name, config_file=config_file) config = get_config(config_file=config_file) env_name, env_config = get_env_config(env_name, config_file=config_file) topology_class = get_topology_from_file(topology_file) set_topology_serializer(env_config, config, topology_class) storm_options = resolve_options( options, env_config, topology_class, name, local_only=True ) if storm_options["topology.acker.executors"] != 0: storm_options["topology.acker.executors"] = 1 storm_options["topology.workers"] = 1 # Set parallelism based on env_name if necessary for spec in topology_class.specs: if isinstance(spec.par, dict): spec.par = spec.par.get(env_name) # Check Storm version is the same local_version = local_storm_version() project_version = storm_lib_version() if local_version != project_version: raise ValueError( "Local Storm version, {}, is not the same as the " "version in your project.clj, {}. The versions must " "match.".format(local_version, project_version) ) # Prepare a JAR that has Storm dependencies packaged topology_jar = jar_for_deploy(simple_jar=False) if time <= 0: time = 9223372036854775807 # Max long value in Java # Write YAML file with show("output"): with NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as yaml_file: topology_flux_dict = topology_class.to_flux_dict(name) topology_flux_dict["config"] = storm_options if yaml.version_info < (0, 15): yaml.safe_dump(topology_flux_dict, yaml_file, default_flow_style=False) else: yml = yaml.YAML(typ="safe", pure=True) yml.default_flow_style = False yml.dump(topology_flux_dict, yaml_file) cmd = ( "storm jar {jar} org.apache.storm.flux.Flux --local --no-splash " "--sleep {time} {yaml}".format( jar=topology_jar, time=time, yaml=yaml_file.name ) ) local(cmd)
def pvserver(remote_dir, paraview_cmd, paraview_port, paraview_remote_port): with show('debug'), remote_tunnel(int(paraview_remote_port), local_port=int(paraview_port)), cd(remote_dir): # with cd(remote_dir): if not use_multiprocess: run('sleep 2;' + paraview_cmd + '</dev/null &>/dev/null&', pty=False) else: # # run('sleep 2;'+paraview_cmd+'&>/dev/null',pty=False) run('sleep 2;' + paraview_cmd) # , pty=False)
def pvserver(remote_dir, paraview_cmd, paraview_port, paraview_remote_port): with show('debug'), remote_tunnel(int(paraview_remote_port),local_port=int(paraview_port)), cd(remote_dir): # with cd(remote_dir): if not use_multiprocess: run('sleep 2;'+paraview_cmd+'</dev/null &>/dev/null&', pty=False) else: # # run('sleep 2;'+paraview_cmd+'&>/dev/null',pty=False) run('sleep 2;'+paraview_cmd) # , pty=False)
def extractfile(commit, path): dirs, file = os.path.split(path) dirs = mkdirs('%s/%s'%(commit, dirs)) with settings(show('stdout'), warn_only = True): if commit == 'target': commit_id = master.commit else: commit_id = selected_branch.commit get_file(commit_id, path, join(binary_base, commit, path))
def remote_machine(user, address, key_filename, debug): display = show('everything') if debug else hide('everything') return settings( display, host_string='{0}@{1}'.format(user, address), key_filename=key_filename, disable_known_hosts=True, )
def _setup_postgres(db_name): create_user = False with hide('output'): sudo('apt-get install -y postgresql postgresql-contrib libpq-dev') if not sudo('psql postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname=\'{}\'"'.format(db_name), user='******').strip(): create_user = True with show('output', 'user'): sudo('createuser --no-createdb --no-superuser --no-createrole --pwprompt {}'.format(db_name), user='******') sudo('createdb --owner={db_name} {db_name}'.format(db_name=db_name), user='******')
def test_local_none_global_true(self): """ combine_stderr: no kwarg => uses global value (True) """ with show('everything'): r = run("both_streams") # Note: the exact way the streams are jumbled here is an implementation # detail of our fake SSH server and may change in the future. eq_("ssttddoeurtr", r.stdout) eq_(r.stderr, "")
def set_permissions_on_spark_data_dir(nodes, spark_data_dir=os.path.join( '/', 'var', 'lib', 'spark'), user='******'): with fab.settings(fab.show('warnings', 'running', 'stdout', 'stderr'), hosts=nodes): execute(fab.sudo, 'chmod -R 777 {spark_data}'.format(spark_data=spark_data_dir)) execute( fab.sudo, 'chown {user}:{user} {spark_data}'.format( user=user, spark_data=spark_data_dir))
def make_remote_spark_data_dir(nodes, spark_data_dir=os.path.join( '/', 'var', 'lib', 'spark'), remove_existing_spark_data=True): with fab.settings(fab.show('warnings', 'running', 'stdout', 'stderr'), hosts=nodes): if remove_existing_spark_data: execute(fab.sudo, 'rm -rf {spark_data}'.format(spark_data=spark_data_dir)) execute(fab.sudo, 'mkdir -p {spark_data}'.format(spark_data=spark_data_dir))