def copyDBFrom(self, config, source_config=False, **kwargs): target_config = config sql_name_source = source_config['tmpFolder'] + '/' + config['config_name'] + '.sql' sql_name_target = target_config['tmpFolder'] + '/' + config['config_name'] + '_target.sql' if source_config['supportsZippedBackups']: sql_name_target += '.gz' source_host_string = join_host_strings(source_config['user'], source_config['host'], source_config['port']) # create dump on source. with _settings( host_string=source_host_string ): self.backupSql(source_config, sql_name_source) # copy dump to target: if source_config['supportsZippedBackups']: sql_name_source += '.gz' args = utils.ssh_no_strict_key_host_checking_params cmd = 'scp -P {port} {args} {user}@{host}:{sql_name_source} {sql_name_target} >>/dev/null'.format( args=args, sql_name_source=sql_name_source, sql_name_target=sql_name_target, **source_config ) run(cmd) with _settings(host_string=source_host_string): self.run_quietly('rm -f %s' % sql_name_source) self.importSQLFromFile(target_config, sql_name_target, True) self.run_quietly('rm -f %s' % sql_name_target)
def createDestroyHelper(stages, command, **kwargs): dockerConfig = configuration.getDockerConfig( configuration.current()['docker']['configuration']) for step in stages: step['dockerConfig'] = dockerConfig log.info(command + ': current stage: \'{stage}\' via \'{connection}\''.format( **step)) hostConfig = {} for key in ['host', 'user', 'port']: hostConfig[key] = configuration.current()[key], methods.call(step['connection'], 'getHostConfig', configuration.current(), hostConfig=hostConfig) hostString = join_host_strings(**hostConfig) with _settings(host_string=hostString): methods.runTask(configuration.current(), command, quiet=True, **step)
def copyDBFrom(self, config, source_config=False, **kwargs): self.setRunLocally(config) target_config = config sql_name_source = source_config['tmpFolder'] + '/' + config[ 'config_name'] + '.sql' sql_name_target = target_config['tmpFolder'] + '/' + config[ 'config_name'] + '_target.sql' if source_config['supportsZippedBackups']: sql_name_target += '.gz' source_host_string = join_host_strings(source_config['user'], source_config['host'], source_config['port']) # create dump on source. with _settings(host_string=source_host_string), self.runLocally( source_config): self.backupSql(source_config, sql_name_source) # copy dump to target: if source_config['supportsZippedBackups']: sql_name_source += '.gz' args = utils.ssh_no_strict_key_host_checking_params cmd = 'scp -P {port} {args} {user}@{host}:{sql_name_source} {sql_name_target} '.format( args=args, sql_name_source=sql_name_source, sql_name_target=sql_name_target, **source_config) self.run(cmd) with _settings(host_string=source_host_string), self.runLocally( source_config): self.run_quietly('rm -f %s' % sql_name_source) self.importSQLFromFile(target_config, sql_name_target, True)
def mkvirtualenv(): """ Setup a fresh virtualenv. """ require('hosts') print(cyan('-- mkvirtualenv // uploading bash_profile...')) put('conf/.bash_profile_source', '/home/%s/.bash_profile' % env.project_user, use_sudo=True) print(cyan('-- mkvirtualenv // setting owner and permissions 660')) _setowner(os.path.join('/home', env.project_user, '.bash_profile')) _setperms('660', os.path.join('/home', env.project_user, '.bash_profile')) print(cyan('-- mkvirtualenv // running virtualenvwrapper.sh')) sudo('export WORKON_HOME=/sites/.virtualenvs', user=env.project_user) with shell_env(WORKON_HOME='/sites/.virtualenvs'): sudo('source /usr/local/bin/virtualenvwrapper.sh', user=env.project_user) print(cyan('-- mkvirtualenv // chmoding hook.log to avoid permission trouble')) with _settings(warn_only=True): _setperms('660', os.path.join(env.venv_root, 'hook.log')) _setowner(os.path.join(env.venv_root, 'hook.log')) with _settings(warn_only=True): if _exists(env.venv_path): print(yellow('-- mkvirtualenv // virtualenv %s already exists - now removing.' % env.venv_path)) sudo('export WORKON_HOME=/sites/.virtualenvs && source /usr/local/bin/virtualenvwrapper.sh && rmvirtualenv %s' % env.venv_name, user=env.project_user) sudo('export WORKON_HOME=/sites/.virtualenvs && source /usr/local/bin/virtualenvwrapper.sh && mkvirtualenv %s' % env.venv_name, user=env.project_user)
def getIp(self, docker_name, docker_host, docker_user, docker_port): host_string = join_host_strings(docker_user, docker_host, docker_port) try: with hide('running', 'output', 'warnings'), _settings( host_string=host_string, warn_only=True ): output = run('docker inspect --format "{{ .NetworkSettings.IPAddress }}" %s ' % (docker_name)) except SystemExit: print red('Docker not running, can\'t get ip') return False ip_address = output.stdout.strip() if output.return_code != 0: return False return ip_address
def createDestroyHelper(stages, command, **kwargs): dockerConfig = configuration.getDockerConfig(configuration.current()['docker']['configuration']) for step in stages: step['dockerConfig'] = dockerConfig log.info(command + ': current stage: \'{stage}\' via \'{connection}\''.format(**step)) hostConfig = {} for key in ['host', 'user', 'port']: hostConfig[key] = configuration.current()[key], methods.call(step['connection'], 'getHostConfig', configuration.current(), hostConfig=hostConfig) hostString = join_host_strings(**hostConfig) with _settings(host_string = hostString): methods.runTask(configuration.current(), command, quiet=True, **step)
def provision_server(server, provfile_path, password, prov): host_string = "%s@%s" % (server['user'], server['address'].strip()) context = { 'abspath': dirname(abspath(provfile_path)), 'path': dirname(provfile_path), 'owner': server['user'], 'cleanup': [], 'registered_loaders': [] } aggregate_node_options(server, context) loader = ChoiceLoader([ FileSystemLoader(join(context['abspath'], 'files')) ]) context['loader'] = loader print_header("Provisioning %s..." % host_string) settings_dict = dict(host_string=host_string, password=password) if 'ssh_key' in server and server['ssh_key']: settings_dict['key_filename'] = server['ssh_key'] with _settings(**settings_dict): context['host'] = server['address'] context['user'] = server['user'] role_instances = [] try: for role in server['roles']: context['role'] = role instance = role(prov, context) role_instances.append(instance) instance.provision() finally: for role in role_instances: role.cleanup() for role in context['cleanup']: role.cleanup() print_header("%s provisioned!" % host_string)
def createuser(): """ Creates a linux user on host, if it doesn't already exists and adds is to configured group """ require('hosts') with _settings(warn_only=True): output = sudo('id %s' % env.project_user) if output.failed: # no such user, create it. sudo('adduser %s' % env.project_user) sudo('usermod -a -G %s %s' % (env.project_group, env.project_user)) output = sudo('id %s' % env.project_user) if output.failed: abort('createuser: ERROR: could not create user!') else: print(yellow('-- createuser // user %s already exists.' % env.project_user)) print(cyan('-- createuser // add to group')) sudo('usermod -a -G %s %s' % (env.project_group, env.project_user))
def waitForServices(self, config, **kwargs): if 'ssh' not in config['needs']: return host_string = join_host_strings(config['user'], config['host'], config['port']) if 'password' in config: self.addPasswordToFabricCache(**config) max_tries = 20 try_n = 0 while(True): try_n += 1 try: with cd(config['rootFolder']), hide('commands'), _settings( host_string=host_string ): output = run('supervisorctl status') output = output.stdout.splitlines() count_running = 0 count_services = 0; for line in output: if line.strip() != '': count_services += 1 if line.find('RUNNING'): count_running += 1 if count_services == count_running: print green('Services up and running!') break; except: # TODO: # handle only relevant exceptions like # fabric.exceptions.NetworkError if (try_n < max_tries): # Let's wait and try again... print "Wait for 5 secs and try again." time.sleep(5) else: print red("Supervisord not coming up at all") break
def bootstrap(): """Execute bootstrap for the local buildout. The default effective python could be overridden by setting ``bootstrap-python`` -hostout-option with a path to an another python executable. """ buildout_directory = _env.hostout.options.get('path') fallback_user = _env.user or 'root' buildout_user = _env.hostout.options.get('buildout-user', fallback_user) local_sudo = _env.hostout.options.get('local-sudo') == "true" assert buildout_directory, u'No path found for the selected hostout' buildout_python = _env.hostout.options.get('executable') bootstrap_python = ( _env.hostout.options.get('bootstrap-python') or buildout_python ) # Bootstrap with _lcd(buildout_directory): cmd = '{0:s} bootstrap.py --distribute'.format(bootstrap_python) cmd = 'su {0:s} -c "{1:s}"'.format(buildout_user, cmd) if local_sudo: cmd = 'sudo {0:s}'.format(cmd) if _output.running: print('[localhost] bootstrap: %s' % cmd) with _settings(warn_only=True): res = _local(cmd) if res.failed: print('First bootstrap failed: we have a new bootstrap which ' 'has --distribute option now default. Trying again...') cmd = '{0:s} bootstrap.py'.format(bootstrap_python) cmd = 'su {0:s} -c "{1:s}"'.format(buildout_user, cmd) if local_sudo: cmd = 'sudo {0:s}'.format(cmd) if _output.running: print('[localhost] bootstrap: %s' % cmd) _local(cmd)
def getIp(self, docker_name, docker_host, docker_user, docker_port, runLocally = False): host_string = join_host_strings(docker_user, docker_host, docker_port) output = False try: if runLocally: with hide('running', 'output', 'warnings'): output = local('docker inspect --format "{{range .NetworkSettings.Networks}}{{.IPAddress}}\n{{end}}" %s ' % (docker_name), capture = True) ips = output.splitlines() else: with hide('running', 'output', 'warnings'), _settings( host_string=host_string, warn_only=True ): output = run('docker inspect --format "{{range .NetworkSettings.Networks}}{{.IPAddress}}\n{{end}}" %s ' % (docker_name)) ips = output.splitlines() except SystemExit: log.error('Docker not running, can\'t get ip') return False if output.return_code != 0: return False return ips[0]
def waitForServices(self, config, **kwargs): if 'ssh' not in config['needs'] or not config['executables']['supervisorctl']: return host_string = join_host_strings(config['user'], config['host'], config['port']) if 'password' in config: self.addPasswordToFabricCache(**config) max_tries = 10 try_n = 0 while(True): try_n += 1 try: with cd(config['rootFolder']), hide('commands'), _settings( host_string=host_string ): output = run('supervisorctl status') output = output.stdout.splitlines() count_running = 0 count_services = 0; for line in output: if line.strip() != '': count_services += 1 if line.find('RUNNING'): count_running += 1 if count_services == count_running: log.info('Services up and running!') break; except Exception as ex: if (try_n < max_tries): # Let's wait and try again... print "Wait for 5 secs and try again." time.sleep(5) else: log.error("Supervisord not coming up at all") log.error(ex) break
def createdb(): """ Creates pgsql role and database """ require('hosts') with _settings(warn_only=True): print(cyan('-- createdb // creating user %s' % env.db_user)) result = sudo('psql -c "CREATE USER %s WITH NOCREATEDB NOCREATEUSER ENCRYPTED PASSWORD \'%s\';"' % (env.db_user, env.db_pass), user='******') if result.failed: if 'already exists' in result: print(yellow('-- createdb // user already exists')) else: abort(red('-- createdb // error in user creation!')) print(cyan('-- createdb // creating db %s with owner %s' % (env.db_name, env.db_user))) result = sudo('psql -c "CREATE DATABASE %s WITH OWNER %s ENCODING \'UTF-8\'";' % ( env.db_name, env.db_user), user='******') if result.failed: if 'already exists' in result: print(yellow('-- createdb // database already exists')) else: abort(red('-- createdb // error in db creation!'))
def run(provfile_path, server_name, password, extra_options): module_path = provfile_path.replace(sep, '.') prov = import_module(module_path) servers = get_servers_for(prov, server_name) for server in servers: if 'options' in server: for option_name, option in server['options'].iteritems(): if isinstance(option, AskFor): if option.key in extra_options: value = extra_options[option.key] else: value = option.get_value(server) server['options'][option_name] = value for server in servers: host_string = "%s@%s" % (server['user'], server['address'].strip()) context = { 'abspath': dirname(abspath(provfile_path)), 'path': dirname(provfile_path), 'owner': server['user'], 'cleanup': [], 'registered_loaders': [] } if 'options' in server and server['options']: for key, value in server['options'].iteritems(): context[key] = value loader = ChoiceLoader([ FileSystemLoader(join(context['abspath'], 'files')) ]) context['loader'] = loader msg = "Provisioning %s..." % host_string print print "*" * len(msg) print msg print "*" * len(msg) settings_dict = dict(host_string=host_string, password=password) if 'ssh_key' in server and server['ssh_key']: settings_dict['key_filename'] = server['ssh_key'] with _settings(**settings_dict): context['host'] = server['address'] context['user'] = server['user'] role_instances = [] try: for role in server['roles']: context['role'] = role instance = role(prov, context) role_instances.append(instance) instance.provision() finally: for role in role_instances: role.cleanup() for role in context['cleanup']: role.cleanup() msg = "%s provisioned!" % host_string print print "*" * len(msg) print msg print "*" * len(msg) print
def run(provfile_path, server_name, password, extra_options): module_path = provfile_path.replace(sep, '.') prov = import_module(module_path) servers = get_servers_for(prov, server_name) for server in servers: if 'options' in server: for option_name, option in server['options'].iteritems(): if isinstance(option, AskFor): if option.key in extra_options: value = extra_options[option.key] else: value = option.get_value(server) server['options'][option_name] = value for server in servers: host_string = "%s@%s" % (server['user'], server['address']) context = { 'abspath': dirname(abspath(provfile_path)), 'path': dirname(provfile_path), 'owner': server['user'], 'cleanup': [], 'registered_loaders': [] } if 'options' in server and server['options']: for key, value in server['options'].iteritems(): context[key] = value loader = ChoiceLoader( [FileSystemLoader(join(context['abspath'], 'files'))]) context['loader'] = loader msg = "Provisioning %s..." % host_string print print "*" * len(msg) print msg print "*" * len(msg) with _settings(host_string=host_string, password=password): context['host'] = server['address'] context['user'] = server['user'] role_instances = [] try: for role in server['roles']: context['role'] = role instance = role(prov, context) role_instances.append(instance) instance.provision() finally: for role in role_instances: role.cleanup() for role in context['cleanup']: role.cleanup() msg = "%s provisioned!" % host_string print print "*" * len(msg) print msg print "*" * len(msg) print