def upload_crontab(self, config, env_option, crontab_root): user = config.get('user') crontab_file = config.get('crontab', env_option + '.conf') position = config.get('position') if not user: self.warn( "no user found,skip deploy crontab file `%s'" % config.get( crontab_file)) return crontab_path = os.path.expanduser( os.path.join(crontab_root, 'config', 'crontab', crontab_file)) if os.path.exists(crontab_path): if position is None or current_machine(position): put(crontab_path, '/tmp/cabric_crontab') run('crontab < /tmp/cabric_crontab', user) run('rm -f /tmp/cabric_crontab') pass else: self.warn("crontab doesn't need" " to deploy this machine:%s." % env.host_string) pass pass else: self.warn("crontab file `%s' not" " found. skip to install it" % crontab_path) pass
def upload_crontab(self, config, env_option, crontab_root): user = config.get('user') crontab_file = config.get('crontab', env_option + '.conf') position = config.get('position') if not user: self.warn("no user found,skip deploy crontab file `%s'" % config.get(crontab_file)) return crontab_path = os.path.expanduser( os.path.join(crontab_root, 'config', 'crontab', crontab_file)) if os.path.exists(crontab_path): if position is None or current_machine(position): put(crontab_path, '/tmp/cabric_crontab') run('crontab < /tmp/cabric_crontab', user) run('rm -f /tmp/cabric_crontab') pass else: self.warn("crontab doesn't need" " to deploy this machine:%s." % env.host_string) pass pass else: self.warn("crontab file `%s' not" " found. skip to install it" % crontab_path) pass
def on_mac(): pkg = pkgs_config.get('brew') if not pkg: return with settings(warn_only=True): run('brew install %s' % ' '.join(pkg)) pass
def on_mac(): pkg = pkgs_config.get('brew') if not pkg: return with settings(warn_only=True): run('brew install %s' % ' '.join(pkg)) pass
def restart(self, restarts, all_services): if all_services and restarts: services = [v for v in restarts if v in all_services] if services: run('systemctl restart %s' % ' '.join(services)) else: self.warn( "restart progress actived,but no restart service found.") pass
def restart(self, restarts, all_services): if all_services and restarts: services = [v for v in restarts if v in all_services] if services: run('systemctl restart %s' % ' '.join(services)) else: self.warn( "restart progress actived,but no restart service found.") pass
def letsencrypt_server(self, domains): # generate key nginx_root = get_home('nginx') if nginx_root: run("systemctl restart nginx") # make sure load new config run("certbot certonly --webroot -w {0} {1}".format( nginx_root, ' '.join(['-d %s' % v for v in domains]))) pass else: raise ValueError("no nginx found.skip config certificate...") pass
def on_all(): """ plan feature support add extra repo source :return: """ # npm_bin = run('which npm') # npm_root = npm_bin.rsplit('/', 2)[0] # npm_etc = os.path.join(npm_root, 'etc') # print(npm_root) run('npm install -g %s' % ' '.join(pkg)) pass
def on_all(): """ plan feature support add extra repo source :return: """ # npm_bin = run('which npm') # npm_root = npm_bin.rsplit('/', 2)[0] # npm_etc = os.path.join(npm_root, 'etc') # print(npm_root) run('npm install -g %s' % ' '.join(pkg)) pass
def letsencrypt_server(self, domains): # generate key nginx_root = get_home('nginx') if nginx_root: run("systemctl restart nginx") # make sure load new config run("certbot certonly --webroot -w {0} {1}".format( nginx_root, ' '.join(['-d %s' % v for v in domains]) )) pass else: raise ValueError("no nginx found.skip config certificate...") pass
def create_certicifate(): try: self.letsencrypt_server(domains) except ValueError as e: self.warn(e) # get certificate certificate_remote_dir = "/etc/letsencrypt/live/" + domains[0] fullchain = run('cat %s' % os.path.join(certificate_remote_dir, 'fullchain.pem')) private_key = run( 'cat %s' % os.path.join(certificate_remote_dir, 'privkey.pem')) print(fullchain) print(private_key) pass
def set_timezone(self, timezone): """ should limit user input ..todo: danger if we don't limit timezone. something like this `Asian/Shanghai && rm -rf /` timedatectl list-timezones timedatectl :param timezone: :return: """ run('timedatectl set-timezone %s' % timezone) pass
def set_timezone(self, timezone): """ should limit user input ..todo: danger if we don't limit timezone. something like this `Asian/Shanghai && rm -rf /` timedatectl list-timezones timedatectl :param timezone: :return: """ run('timedatectl set-timezone %s' % timezone) pass
def install(pypi_config): python_version = pypi_config.get('version') if python_version: self.install_pyenv(python_version, skip_pkg) used_version = python_version \ if python_version else self.system_python_version for pkg in pypi_config.get('packages'): run('export PYENV_ROOT="/usr/local/var/pyenv/" && ' 'export PYENV_VERSION=%s && ' 'pip install %s' % (used_version, pkg)) pass pass
def install(pypi_config): python_version = pypi_config.get('version') if python_version: self.install_pyenv(python_version, skip_pkg) used_version = python_version \ if python_version else self.system_python_version for pkg in pypi_config.get('packages'): run('export PYENV_ROOT="/usr/local/var/pyenv/" && ' 'export PYENV_VERSION=%s && ' 'pip install %s' % (used_version, pkg)) pass pass
def get_project_python(self, user, project_name): project_path = self.get_remote_project_path(user, project_name) with settings(warn_only=True): version = run("test -f {0}/.python-version &&" " cat {0}/.python-version".format(project_path)) version = version.strip("\n") return version pass
def create_certicifate(): try: self.letsencrypt_server(domains) except ValueError as e: self.warn(e) # get certificate certificate_remote_dir = "/etc/letsencrypt/live/" + domains[0] fullchain = run( 'cat %s' % os.path.join(certificate_remote_dir, 'fullchain.pem')) private_key = run( 'cat %s' % os.path.join(certificate_remote_dir, 'privkey.pem')) print(fullchain) print(private_key) pass
def on_centos(): """ workflow * parse config and set default value * force set shell to /sbin/nologin when system flag is set * cause error when config.name not exists * skip create when user exists * create group explicitly when user specify and not same config.name :return: """ for user in users: username = user.get('name') groupname = user.get('group', username) home = user.get('home', '/home/{}'.format(username)) shell = user.get('shell', '`which bash`') system_flag = user.get('system', False) perm = user.get('perm') if system_flag: shell = '/sbin/nologin' if not username: self.error("invalid user config") if exist_user(username): if perm: run('chmod {1} {0}'.format(home, perm)) self.warn("user `%s' exist.skip to create." % username) continue system_str = '-r' if system_flag else '' if groupname == username: group_str = '-U' else: if not exist_group(groupname): run('groupadd {1} {0}'.format(groupname, system_str)) pass group_str = '-g {}'.format(groupname) run('useradd {1} -d {2} {4} -s {3} {0}'.format(username, group_str, home, shell, system_str)) # todo: # .. if perm cause error, user will not delete if perm: run('chmod {1} {0}'.format(home, perm)) pass
def set_host(ip, host): """ if you want to use this feature for old file, use 4 space. limit: only support one2one relation :param ip: :param host: :return: """ with fabric_settings(warn_only=True): match = '%s %s' % (ip, host) if run('grep "%s" /etc/hosts' % match).failed: run('echo "%s" >> /etc/hosts' % match) # elif run('grep "%s" /etc/hosts' % host): # sed -i -e "s//g" ??? # pass pass
def set_host(ip, host): """ if you want to use this feature for old file, use 4 space. limit: only support one2one relation :param ip: :param host: :return: """ with fabric_settings(warn_only=True): match = '%s %s' % (ip, host) if run('grep "%s" /etc/hosts' % match).failed: run('echo "%s" >> /etc/hosts' % match) # elif run('grep "%s" /etc/hosts' % host): # sed -i -e "s//g" ??? # pass pass
def compile_templates(self, user, project_name): """ try compile pug template files ..note:: cabric use project root as pug basedir root. if you don't use this path, use `--skip-compile-templates` to skip this progress. :param user: remote user :param project_name: project name :return: """ project_path = self.get_remote_project_path(user, project_name) run('which pug && pug -E html -b {0} {0} ' '|| echo "skip parser jade file"'.format(project_path)) pass
def on_centos(): """ workflow * parse config and set default value * force set shell to /sbin/nologin when system flag is set * cause error when config.name not exists * skip create when user exists * create group explicitly when user specify and not same config.name :return: """ for user in users: username = user.get('name') groupname = user.get('group', username) home = user.get('home', '/home/{}'.format(username)) shell = user.get('shell', '`which bash`') system_flag = user.get('system', False) perm = user.get('perm') if system_flag: shell = '/sbin/nologin' if not username: self.error("invalid user config") if exist_user(username): if perm: run('chmod {1} {0}'.format(home, perm)) self.warn("user `%s' exist.skip to create." % username) continue system_str = '-r' if system_flag else '' if groupname == username: group_str = '-U' else: if not exist_group(groupname): run('groupadd {1} {0}'.format(groupname, system_str)) pass group_str = '-g {}'.format(groupname) run('useradd {1} -d {2} {4} -s {3} {0}'.format( username, group_str, home, shell, system_str)) # todo: # .. if perm cause error, user will not delete if perm: run('chmod {1} {0}'.format(home, perm)) pass
def enable_services(self, services): """ :param list services: service list :return: """ if get_platform() == 'centos': # run('systemctl disable %s' % ' '.join(services)) [run('systemctl enable %s' % v) for v in services] else: self.warn("not support platform.no services enabled.") pass
def migrate_db(self, user, project_name, django_settings='web.online'): """ try migrate database :param user: remote user :param project_name: project name :return: """ project_path = self.get_remote_project_path(user, project_name) with cd(project_path): run('test -f %(project_path)s/manage.py && ' 'python %(project_path)s/manage.py migrate ' '--settings=%(settings)s' ' || echo "skip migrate database"' % { 'project_path': project_path, 'settings': 'web.online' }, user=user) pass pass
def on_centos(): rpm_keys = pkgs_config.get('rpm-keys', []) if rpm_keys: for v in rpm_keys: run("rpm --import %s" % v) pass pass pkg = pkgs_config.get('yum') if pkg: mirror_put(root, '/etc/yum.repos.d', validate=False) run('yum install -y epel-release') run('yum install -y %s' % ' '.join(pkg)) pkg_local = pkgs_config.get('yum-local', []) for p in pkg_local: with cd("/tmp"): run("curl -L -O %s" % p) run('yum localinstall -y %s' % os.path.basename(p)) pass pass pass
def upload_javascripts(self, remote_user, project_name, working_root=None): remote_path = self.get_remote_project_path(remote_user, project_name) working_root = working_root or os.getcwd() webpack_stats_file = os.path.join(working_root, 'javascripts', 'webpack-stats.json') remote_javascripts_dir = os.path.join(remote_path, 'javascripts') with settings(warn_only=True): if os.path.exists(webpack_stats_file): if not run("test -d %s" % remote_javascripts_dir).failed: put(webpack_stats_file, remote_javascripts_dir) pass pass
def set_hostname(self): """ set machine hostname :return: """ try: host_index = env.hosts.index(env.host_string) if env.host_names[host_index]: run("hostnamectl set-hostname %s" % env.host_names[host_index]) run("hostnamectl set-hostname %s --pretty" % env.host_names[host_index]) run("hostnamectl set-hostname %s --static" % env.host_names[host_index]) run("systemctl restart systemd-hostnamed") pass except IndexError: self.warn("can't find current hostname config:%s" % env.host_string) pass pass
def stop(self, services, all_services): if all_services and services: s = [v for v in services if v in all_services] run('systemctl stop %s' % ' '.join(s)) pass
def install_requirements(self, user, project_name, pip='pip'): """ when requirements file exits. install it. :param user: remote user to deploy :param project_name: project name :return: """ project_path = self.get_remote_project_path(user, project_name) python_version = self.get_project_python(user, project_name) bin_home = '/usr/local/var/pyenv/' if python_version: bin_dir = bin_home + 'versions/%s/bin' % python_version else: python_version = run('test -e ~%(user)s/.python-version && ' 'cat ~%(user)s/.python-version ' '|| echo ""' % {'user': user}) bin_dir = bin_home + 'versions/%s/bin' % python_version if not python_version: bin_dir = bin_home + 'shims' pass requirement_files = [ os.path.join(project_path, 'requirements.txt'), os.path.join(project_path, 'requirements', 'test.txt'), os.path.join(project_path, 'requirements', 'private.txt'), ] python_v = run('%s/python -V' % bin_dir, capture=True) if python_v.split(' ')[1][0] == '3': pip_tool = 'pip3' else: pip_tool = 'pip' pass for f in requirement_files: # 有版本bug # " install -U --upgrade-strategy only-if-needed -r {0} || " run("test -f {0} && {1}/{2}" " install -U -r {0} || " "echo '{0} not exist,skip install...'".format( f, bin_dir, pip_tool)) pass requirement_files = [ os.path.join(project_path, 'requirements-static.txt'), os.path.join(project_path, 'requirements', 'zip.txt'), os.path.join(project_path, 'requirements', 'private-static.txt'), os.path.join(project_path, 'requirements', 'test-static.txt'), ] for f in requirement_files: run("test -f {0} && {1}/{2}" " install -r {0} || " "echo '{0} not exist,skip install...'".format( f, bin_dir, pip_tool)) pass pass
def reload(self, reloads, all_services): if all_services and reloads: services = [v for v in reloads if v in all_services] run('systemctl reload %s' % ' '.join(services)) pass
def set_ssl_config(self, ssl_config, env_name): """ Not support parallel workflow :param ssl_config: :param env_name: :return: """ # verify machine which to use encrypt_position = ssl_config.get('encrypt-position', 0) try: use_host = env.hosts[encrypt_position] if use_host != env.host_string: return except IndexError: self.error("`ssl.encrypt-position' value is invalid.") # verify domains domains = ssl_config.get('domains') if not domains and not isinstance(domains, list): self.error("`ssl.domains' must be config.") # set dh_param dh_param = ssl_config.get('dhparam') if dh_param: dh_param_file = dh_param['path'] dh_param_length = dh_param.get('length', 4096) run("test -f {0} || openssl dhparam -out {0} {1}".format( dh_param_file, dh_param_length)) pass def create_certicifate(): try: self.letsencrypt_server(domains) except ValueError as e: self.warn(e) # get certificate certificate_remote_dir = "/etc/letsencrypt/live/" + domains[0] fullchain = run( 'cat %s' % os.path.join(certificate_remote_dir, 'fullchain.pem')) private_key = run( 'cat %s' % os.path.join(certificate_remote_dir, 'privkey.pem')) print(fullchain) print(private_key) pass # try to manage load balancer load_balancer = ssl_config.get('load-balancer') if load_balancer: lb_isp = load_balancer.get('isp') upstream_mode = load_balancer.get('upstream-mode') if lb_isp.lower() == 'qingcloud.com': from cabric.cloud.qingcloud import QingCloud client = QingCloud() client.connect(load_balancer['zone']) client.connector.debug = self.options.debug if upstream_mode: create_certicifate() return # try to set forward policy policy_name = 'letsencrypt-' + env_name policy = client.get_or_create_loadbalancer_policy(policy_name) # try to set forward rule rules = [{ 'loadbalancer_policy_rule_name': domain, 'rule_type': 'url', 'val': '^/.well-known' } for domain in ssl_config['domains']] for rule in rules: client.get_or_add_loadbalancer_policy_rules( policy['loadbalancer_policy_id'], rule) client.apply_loadbalancer_policy( policy['loadbalancer_policy_id']) http_listener = load_balancer.get('http-listener') # try to set backend # ..note:: # please make sure you backend works right. backend = load_balancer.get('backend') backend.update({ 'loadbalancer_backend_name': policy['loadbalancer_policy_name'], 'loadbalancer_policy_id': policy['loadbalancer_policy_id'] }) if http_listener and backend: client.get_or_add_load_balancer_backends( http_listener, backend) pass create_certicifate() pass elif lb_isp is None: self.warn("load balancer isp not specified." "skip config load balancer") pass else: self.warn("unknown isp for load balancer %s," "skip config load balancer" % lb_isp) pass pass else: create_certicifate() pass pass
def on_centos(): if not skip_pkg: run('yum install -y epel-release') run('yum install -y nodejs npm') pass
def upgrade(self, remote_user, project_name, repo, branch, commit=None): """ upgrade source code :param remote_user:deploy username :param project_name: project name :param repo:git repo address :param branch:which branch to deploy ..note:: currently,if remote machine already cloned from repo, branch can't be change. if you really need to change branch. you have to remove remote project directory, do upgrade again. :param commit:which commit to deploy,default use latest commit, support tags ..note:: commit or tag must be valid in branch :return: """ host = get_git_host(repo) known_host(host, remote_user) remote_path = self.get_remote_project_path(remote_user, project_name) deploy_key = self.get_remote_key(remote_user, project_name) with settings(warn_only=True): if run("test -d %s/.git" % remote_path).failed: parent_path = os.path.dirname(remote_path) run('test -d {0} || mkdir {0}'.format(parent_path), remote_user) with cd(parent_path): run( 'git clone {} -b {} {}'.format(repo, branch, remote_path), remote_user) run( "cd {} && git config core.fileMode false".format( remote_path), remote_user) run("cd {} && git pull origin {}".format(remote_path, branch), remote_user) run("cd {} && git pull origin {} --tags".format(remote_path, branch), remote_user) if commit: # make sure there is no merge commit on remote server # run("cd {} && git checkout -- .".format(remote_path), # remote_user) run("cd {} && git checkout {}".format(remote_path, commit), remote_user) pass
def install_pyenv(self, versions=None, skip_pkg=False): """ We will install pyenv by default. and with normal use lib Because pyenv is awesome!!! :param root: :param skip_pkg: skip install depends package, default is False :return: """ remote_os = get_platform() if remote_os == 'centos': if not skip_pkg: run('yum install -y git') run("yum install -y gcc gcc-c++ make autoconf certbot" " libffi-devel ncurses-devel expat-devel" " zlib-devel zlib libzip-devel" " bzip2 bzip2-devel bzip2-libs" " mariadb-devel mariadb-libs" " sqlite-devel" " libxml2 libxml2-devel libxslt libxslt-devel" " libcurl-devel" " pcre-devel pcre" " libmcrypt libmcrypt-devel openssl-devel openssl-libs" " libjpeg libjpeg-devel libpng libpng-devel" " freetype freetype-devel " " libtiff-devel lcms2-devel libwebp-devel" " tcl-devel tk-devel") run('export PYENV_ROOT=/usr/local/var/pyenv && ' 'curl -L http://nextoa.com/' 'bin/pyenv-install.sh | bash') run('ln -sfv /usr/local/var/pyenv/bin/pyenv /usr/local/bin/pyenv') pass elif remote_os == 'mac': with settings(warn_only=True): run('brew install git pyenv') pass run('test -d /usr/local/var/pyenv/plugins/pyenv-virtualenv ||' ' git clone https://github.com/yyuu/pyenv-virtualenv.git' ' /usr/local/var/pyenv/plugins/pyenv-virtualenv') run('export PYENV_ROOT="/usr/local/var/pyenv/" &&' ' eval "$(pyenv init -)"') if isinstance(versions, list): for v in versions: run('export PYENV_ROOT="/usr/local/var/pyenv/" &&' ' pyenv install -s %s' % v) elif type(versions).__name__ in ['str', 'unicode']: run('export PYENV_ROOT="/usr/local/var/pyenv/" &&' ' pyenv install -s %s' % versions) else: run('export PYENV_ROOT="/usr/local/var/pyenv/" && ' 'pyenv install -s %s' % self.system_python_version) command_list = [ """grep "PYENV_ROOT" /etc/profile || \ echo 'export PYENV_ROOT="/usr/local/var/pyenv/"' \ >> /etc/profile""", """grep "pyenv init" /etc/profile || \ echo 'eval "$(pyenv init -)"' >> \ /etc/profile""", """grep "pyenv virtualenv" /etc/profile || \ echo 'eval "$(pyenv virtualenv-init -)"' \ >> /etc/profile""", ] if remote_os == 'mac': command_list = [ "sudo sh -c '%s'" % shell_quote(v) for v in command_list ] pass for cmd in command_list: run(cmd) pass pass
def before_install(self, root): run('yum install -y rsync') mirror_put(root, '/etc/hosts', validate=False) mirror_put(root, '/root/.bash_profile', validate=False) pass
def install_project_python(self, user, project_name): remote_version = self.get_project_python(user, project_name) if remote_version: run('pyenv install -s %s' % remote_version) pass
def upload_resources(self, user, project_name, working_root=None, static_prefix=None): """ upload static resoures file is exists. only works for django+nginx(start with nginx user) project. cabric will upload: - static django resource directory - assets webpack resource directory ..todo:: use remote path to validate install :return: """ remote_root = self.get_remote_project_path(user, project_name) working_root = working_root or os.getcwd() django_manage = os.path.join(working_root, 'manage.py') if not os.path.exists(django_manage): self.warn( "local root is not a django project,skip upload resources") return with settings(warn_only=True): if run("test -f %s/manage.py" % remote_root).failed: self.warn("deploy project is not django project," "skip upload resources") return pass try: nginx_home = get_home('nginx') except ValueError: self.warn("remote server only support nginx " "and must use nginx user start," "skip deploy static resources...") return static_prefix = static_prefix or '' nginx_static_root = os.path.join(nginx_home, static_prefix, 'static') # collect static files by user # fabric_local('python manage.py collectstatic --noinput') with settings(warn_only=True): run('test -e {0} || mkdir -p {0}'.format(nginx_static_root)) static_root_list = [ os.path.join(working_root, 'static'), os.path.join(working_root, 'assets') ] for v in static_root_list: if os.path.exists(v): put(v, nginx_static_root) pass pass pass
def on_mac(): if skip_pkg: run('brew install node') pass
def upload_deploy_key(self, private_key, remote_user, project_name): """ upload deploy key :param string private_key: private key local path default is ~/.ssh/.deploies/`github`.rsa :param string remote_user: remote user name to deploy :param string project_name: a project name :param string github: github repo name :param bool force_renew: try to replace deploy key when use auto-generate :param int key_length: must a legal ssh key length value. default is 8192 ..note:: if you use github and want to use auto generate private-key feature. there is two ways can do this: - you must set access token in your ~/.gitconfig file - you must disable github two-factor authentication, input your username and password. currently, we use `<remote_user>@cabric` as our deploy key name. so if you upload your key use to other purpose, don't use `@cabric` as key suffix. if github deploy key already exist and you want to replace deploy key. you must set `--fresh-new' option. cabric allow each machine deploy multiple github project, but disallow deploy same name project in one user. if you still want do this. - you can set github value if you use it. - deploy them in different remote user. ..note:: currently, this only works on linux. :return: """ if not os.path.exists(private_key): self.error("deploy key `%s' is not exists,please set it." % private_key) if os.path.exists(private_key): self.print_message("upload deploy key...") remote_key = self.get_remote_key(remote_user, project_name) remote_key_root = os.path.dirname(remote_key) run('test -e {0} || mkdir -p {0}'.format(remote_key_root), remote_user) with settings(warn_only=True): run('chmod 700 -Rf {}'.format(remote_key_root), remote_user) fabric_put(private_key, remote_key) run('chmod 600 -f {}'.format(remote_key)) run('chown {1} -f {0}'.format(remote_key, remote_user)) pass pass