def upload_zodb(prod_user=None, path=None): """Upload ZODB part of Zope's data to the server.""" opts = dict( prod_user=prod_user or env.get('prod_user'), path=path or env.get('path') or os.getcwd() ) # _verify_env(['prod_user', 'path', ]) if not env.get('confirm'): confirm("This will destroy the current Data.fs file on the server. " \ "Are you sure you want to continue?") with cd('/home/%(prod_user)s/niteoweb.%(shortname)s/var/filestorage' % opts): # remove temporary BLOBs from previous uploads if exists('/tmp/Data.fs'): sudo('rm -rf /tmp/Data.fs') # upload Data.fs to server and set production user as it's owner upload_template( filename='%(path)s/var/filestorage/Data.fs' % opts, destination='Data.fs', use_sudo=True ) sudo('chown -R %(prod_user)s:%(prod_user)s Data.fs' % opts)
def finish_configuring_slave(): parameter_default_values() with settings(host_string=env.pgslave_user_host): # upload repmgr.conf on slave server repmgr_context = dict(cluster_name=env.cluster_name, node_number=env.slave_node_number, sync_user=env.sync_user, sync_db=env.sync_db, sync_pass=env.sync_pass, ssh_port=env.master_ssh_port, ) repmgr_conf_file = 'conf/repmgr/repmgr.conf' if not isfile(repmgr_conf_file): repmgr_conf_file = '%s/%s' % (pg_fabrep_path, repmgr_conf_file) upload_template(repmgr_conf_file, env.master_pgdata_path, context=repmgr_context, backup=False) slave_postgresql_conf = "%spostgresql.conf" % env.slave_pgdata_path slave_postgresql_conf_bck = "%spostgresql.conf.bck" % env.slave_pgdata_path sudo('mv %s %s' % (slave_postgresql_conf, slave_postgresql_conf_bck)) sudo("sed '/hot_standby =/c hot_standby = on' %s > %s" % \ (slave_postgresql_conf_bck, slave_postgresql_conf)) sudo("mkdir -p %s" % env.slave_pgconf_path) sudo("cp %spg_hba.conf %s" % (env.slave_pgdata_path, env.slave_pgconf_path)) sudo("cp %spg_ident.conf %s" % (env.slave_pgdata_path, env.slave_pgconf_path)) sudo("cp %spostgresql.conf %s" % (env.slave_pgdata_path, env.slave_pgconf_path)) run("sudo -u postgres pg_ctl -D /var/lib/postgresql/%(postgres_version)s/%(cluster_name)s/ start" % env)
def create_upstart_configs(): """Generate Upstart configuration files """ template = local_path("templates/upstart.cfg") processes = fabric.env.cfg.processes or DEFAULT_PROCESSES workers = fabric.env.cfg.get("workers") args = { "user": fabric.env.user, "group": fabric.env.user, "path": fabric.env.cfg.root, "app_name": fabric.env.cfg.app_name, "root": fabric.env.cfg.root, "shared": root_path("shared"), "server_name": fabric.env.cfg.server_name, "env_name": fabric.env.environment_name, "workers": "--workers={}".format(workers) if workers else "", } for process_name, base_command in processes.items(): if process_name in DEFAULT_PROCESSES and not base_command: base_command = DEFAULT_PROCESSES[process_name] args.update({ "description": "{} {}".format(fabric.env.cfg.app_name, process_name), "process_name": process_name, "process": UPSTART_RUNNER.format(root=fabric.env.cfg.root, command=base_command.format(**args)), }) upload_template(template, root_path("shared/init", "{app_name}-{env_name}-{process_name}.conf".format(**args)), args, backup=False)
def deploy(first=False, backup=False): """ Deploy latest version of the project. Check out the latest version of the project from version control, install new requirements, sync and migrate the database, collect any new static assets, and restart gunicorn's work processes for the project. """ if not exists(env.proj_path): abort("Project %s does not exist in host server. " "Run fab create before trying to deploy." % env.proj_name) srv, ssn, acn = get_webf_session() app = get_webf_obj(srv, ssn, "app", env.proj_name) env.gunicorn_port = app["port"] for name in get_templates(): upload_template_and_reload(name) local("git push webfaction master") if backup: with project(): backup("last.db") static_dir = static() if exists(static_dir): run("tar -cf last.tar %s" % static_dir) manage("collectstatic -v 0 --noinput") static_dir = static() + "/.htaccess" upload_template("deploy/htaccess", static_dir, backup=False) manage("syncdb --noinput") manage("migrate --noinput") if first: run("supervisorctl update") else: restart() return True
def upload_template_and_reload(name): """ Uploads a template only if it has changed, and if so, reload the related service. """ template = get_templates()[name] local_path = template["local_path"] if not os.path.exists(local_path): project_root = os.path.dirname(os.path.abspath(__file__)) local_path = os.path.join(project_root, local_path) remote_path = template["remote_path"] reload_command = template.get("reload_command") owner = template.get("owner") mode = template.get("mode") remote_data = "" if exists(remote_path): with hide("stdout"): remote_data = sudo("cat %s" % remote_path, show=False) with open(local_path, "r") as f: local_data = f.read() # Escape all non-string-formatting-placeholder occurrences of '%': local_data = re.sub(r"%(?!\(\w+\)s)", "%%", local_data) if "%(db_pass)s" in local_data: env.db_pass = db_pass() local_data %= env clean = lambda s: s.replace("\n", "").replace("\r", "").strip() if clean(remote_data) == clean(local_data): return upload_template(local_path, remote_path, env, use_sudo=True, backup=False) if owner: sudo("chown %s %s" % (owner, remote_path)) if mode: sudo("chmod %s %s" % (mode, remote_path)) if reload_command: sudo(reload_command)
def generate_settings_local(connection, database, site=None): """ Generate the site(s) settings_local files. Usage: fab generate_settings_local:dev,dbname,sitename """ if site is None: sites = [site["name"] for site in settings.SITES] else: sites = [site] for site in sites: if connection == "dev": env = Environment(loader=PackageLoader("deploymachine", "templates")) template = env.get_template("settings_local_dev.j2") result = template.render(site=site) r_file = open("{0}{1}/{1}/settings_local.py".format(settings.SITES_LOCAL_ROOT, site), "w") r_file.write(result) r_file.close() elif connection == "prod": filename = "templates/settings_local_prod.j2" destination = "{0}{1}/{1}/settings_local.py".format(settings.SITES_ROOT, site) context = { "site": site, "psql_port": settings.PGSQL_PORT, "redis_port": settings.REDIS_PORT, "db_password": settings.DATABASES[database], "openstack_username": settings.OPENSTACK_USERNAME, "openstack_api_key": settings.OPENSTACK_APIKEY, } upload_template(filename, destination, context=context, use_jinja=True) else: print("Invalid connection type. Use ``dev`` or ``prod``.")
def update_supervisord(): upload_template("conf_templates/supervisord.conf", env.conf_dir + "/supervisord.conf", { "user": env.user, "password": "******", # we're not exposing the port so this isn't so important "supervisor_port": env.supervisor_port, "wsgi_handler": env.wsgi_handler, "port": env.server_port, "app_name": env.app_name, "app_dir": env.app_dir, "bin_dir": env.bin_dir, "conf_dir": env.conf_dir, "virtualenv": env.virtualenv, "settings_module": env.settings_module }) upload_template("conf_templates/start_supervisor.sh", env.bin_dir + '/start_supervisor.sh', { "user": env.user, "conf_dir": env.conf_dir, "virtualenv": env.virtualenv, "app_name": env.app_name }, mode=0750, )
def upload_template_and_reload(name, force=False): """ Uploads a template only if it has changed, and if so, reload a related service. """ template = get_templates()[name] local_path = template["local_path"] if not os.path.exists(local_path): project_root = os.path.dirname(os.path.abspath(__file__)) local_path = os.path.join(project_root, local_path) remote_path = template["remote_path"] reload_command = template.get("reload_command") owner = template.get("owner") mode = template.get("mode") remote_data = "" if exists(remote_path): with hide("stdout"): remote_data = sudo("cat %s" % remote_path, show=False) local_data = render(template, local_path) clean = lambda s: s.replace("\n", "").replace("\r", "").strip() if clean(remote_data) == clean(local_data) and not force: return upload_template(local_path, remote_path, env, use_sudo=True, backup=False, use_jinja=template.get('jinja')) if owner: sudo("chown %s %s" % (owner, remote_path)) if mode: sudo("chmod %s %s" % (mode, remote_path)) if reload_command: sudo(reload_command)
def generate_settings_main(connection, site=None): """ Generate the site(s) main ``settings.py`` file. Usage: fab generate_settings_main:dev,sitename """ if site is None: sites = [site["name"] for site in settings.SITES] else: sites = [site] for site in sites: if connection == "dev": env = Environment(loader=PackageLoader("deploymachine", "templates")) template = env.get_template("settings_main.j2") result = template.render(settings.SETTINGS_CUSTOM[site]) r_file = open("{0}{1}/{1}/settings.py".format(settings.SITES_LOCAL_ROOT, site), "w") r_file.write(result) r_file.close() elif connection == "prod": filename = "templates/settings_main.j2" destination = "{0}{1}/{1}/settings.py".format(settings.SITES_ROOT, site) context = settings.SETTINGS_CUSTOM[site] upload_template(filename, destination, context=context, use_jinja=True) else: print("Invalid connection type. Use ``dev`` or ``prod``.")
def proxy(): with settings(user='******', password=password): upload_template( "templates/kube-proxy.yaml", "/etc/kubernetes/manifests/kube-proxy.yaml", { "gateway_ip": gateway_ip } )
def upload_template_and_reload(name): """ Uploads a template only if it has changed, and if so, reload a related service. """ template = get_templates()[name] local_path = template["local_path"] remote_path = template["remote_path"] reload_command = template.get("reload_command") owner = template.get("owner") mode = template.get("mode") require_sudo = template.get('require_sudo', '1') remote_data = "" exec_func = sudo if env.have_sudo else run if exists(remote_path): with hide("stdout"): remote_data = exec_func("cat %s" % remote_path, show=False) with open(local_path, "r") as f: local_data = f.read() if "%(db_pass)s" in local_data: env.db_pass = db_pass() local_data %= env clean = lambda s: s.replace("\n", "").replace("\r", "").strip() if clean(remote_data) == clean(local_data): return if require_sudo == '0' or env.have_sudo: upload_template(local_path, remote_path, env, use_sudo=env.have_sudo, backup=False) if owner: exec_func("chown %s %s" % (owner, remote_path)) if mode: exec_func("chmod %s %s" % (mode, remote_path)) if reload_command: exec_func(reload_command)
def demo_haproxy_config(): with settings(user='******', password=password): upload_template( "templates/demo/haproxy.cfg", "/etc/haproxy/haproxy.cfg" ) sudo("systemctl restart haproxy")
def control_manager(): with settings(user='******', password=password): upload_template( "templates/kube-controller-manager.yaml", "/etc/kubernetes/manifests/kube-controller-manager.yaml", { "gateway_ip": gateway_ip } )
def ensure(**context): if 'CURRENT_VIRTUAL_ENV_DIR' not in env: raise Exception('只可以在虚拟环境安装Python包') venv_dir = env.CURRENT_VIRTUAL_ENV_DIR package.ensure('supervisor') context.setdefault('run_root', venv_dir) context.setdefault('username', util.random_str(10)) context.setdefault('password', util.random_str(20, True)) context.setdefault('process_count', 2) context.setdefault('venv_dir', venv_dir) context.setdefault('virtualenv_name', venv_dir[-1:]) if 'VENV_PORT_PREFIX_MAP' in env and isinstance(env.VENV_PORT_PREFIX_MAP, dict): try: context.setdefault('port', env.VENV_PORT_PREFIX_MAP[venv_dir[-1:]]) except KeyError: raise Exception('你的端口配置VENV_DIR_PORT_MAP中key[%s]不存在!' % venv_dir[-1:]) if 'PROCESS_COUNT' in env: context['process_count'] = env.PROCESS_COUNT config.check('SUPERVISOR_CONF_TEMPLATE') config_template = env.SUPERVISOR_CONF_TEMPLATE destination = path.join(venv_dir, 'etc', 'supervisord.conf') template_dir, filename = path.dirname(config_template), path.basename(config_template) files.upload_template(filename, destination, context=context, use_jinja=True, template_dir=template_dir)
def setup_upstart(self): context = { "description": env.description, "project_dir": env.appdir, "exec_command": "%(envdir)s/bin/python manage.py run_gunicorn --user=%(appuser)s --bind=%(appbind)s archaeobotany.wsgi:application" % env } upload_template('deploy/upstart.template', '/etc/init/%s.conf' % env.appuser, context, use_sudo=True)
def sf_ssl_proxy_build(): """ Build the Nginx Seafile Server SSL Proxy image """ _require_ssl_proxy_dirs() docker_vars = _sf_ssl_proxy_docker_vars() config_vars = _sf_ssl_proxy_private_config() config_vars_dict = dict(config_vars.items('seafile')) work_dir = docker_vars['work_dir'] # nginx/ssl configuration upload_template('private/seafile_ssl_proxy/default.conf', work_dir, dict(config_vars_dict.items() + {'docker_host': env.host}.items())) put('private/seafile_ssl_proxy/server.key', work_dir) put('private/seafile_ssl_proxy/server.crt', work_dir) # SSH configuration put('private/ssh/id_rsa_devbox.pub', work_dir + '/id_rsa.pub') # Supervisor put('seafile_ssl_proxy/supervisord.conf', work_dir) # Dockerfile upload_template('seafile_ssl_proxy/Dockerfile', work_dir + '/', docker_vars) # build with cd(work_dir): # <username / private repo address>/repo_name>:tag run('docker build -t %(image)s .' % docker_vars)
def setup_master(and_minion=True): """Sets up the master. and_minion: Sets up a minion to point to the master (itself). Defaults to True. """ and_minion = boolean(and_minion) bootstrap_salt(master=True, minion=and_minion) # upload master config server_name = env.servers[env.host_string]['name'] config = servers.configuration_for_server(server_name) master_config = config.find_file('master') context = dict( salt_data_dir=settings.salt_data_dir, configs=config.names, roles=settings.salt_master_roles + servers.roles_for_server(server_name), ) sudo('mkdir -p {salt_data_dir}; true'.format(**context)) sudo('mkdir -p /etc/salt/; true'.format(**context)) files.upload_template(master_config, os.path.join('/etc/salt/', 'master'), context=context, use_jinja=True, use_sudo=True) if and_minion: setup_minion('127.0.0.1', server_name, bootstrap=False, restart_service=False) env.just_bootstrapped = True opsys = get_operating_system(settings.os_detectors) bootstrap = settings.bootstrap_for_operating_system(opsys) sudo(bootstrap['stop-master']) sudo(bootstrap['stop-minion']) sudo(bootstrap['start-master']) sudo(bootstrap['start-minion'])
def deploy_troia_server(confpath=DEFAULT_PATH): conf = readconf(confpath) # Ensure all services are already installed. ensure_srv(conf) src = '{source_root}/Troia-Server'.format(**conf) clone_or_update(src, conf['troia_server_repo']) target = '{}/target/GetAnotherLabel.war'.format(src) maven_build(src, target, cmd='package -Dmaven.test.skip=true', mvn='{maven_root}/bin/mvn'.format(**conf)) run('cp {} {tomcat_root}/webapps'.format(target, **conf)) execute(restart_troia_server, confpath=confpath) upload_template( os.path.join(CONF_ROOT, 'troia-server', 'dawidskene.properties'), '{tomcat_root}/webapps/GetAnotherLabel/WEB-INF/classes/dawidskene.properties'.format(**conf), context=conf) media_root = '{hyde_root}/media'.format(**conf) ensure_tree(media_root, ('downloads')) run('cp {} {}/downloads'.format(target, media_root)) ensure_tree('{project_root}'.format(**conf), ('scripts', 'sql')) upload_template( os.path.join(CONF_ROOT, 'db_clear.sh'), '{project_root}/scripts'.format(**conf), context=conf) put(os.path.join(CONF_ROOT, 'db_clear.sql'), '{sql_root}'.format(**conf))
def ensure_srv(conf): ''' Ensures services (maven, tomcat) are installed correctly. It takes care only about services that are under ``services`` directory. ''' if exists('{services_root}/tomcat/bin/catalina.sh'.format(**conf)) and \ exists('{services_root}/maven/bin/mvn'.format(**conf)): message('Services already installed. Skipping') return with cd('/tmp'): ensure_tree('{project_root}'.format(**conf), 'services') if not exists("/tmp/tomcat.tgz"): message('Downloading apache tomcat') run('wget {tomcat_url} -O tomcat.tgz'.format(**conf)) message('Installing apache tomcat') run('tar xzf tomcat.tgz') run('rm -rf tomcat/') run('mv apache-tomcat-* tomcat') run('cp -rf tomcat {services_root}'.format(**conf)) if not exists("/tmp/maven.tgz"): message('Downloading apache maven') run('wget {maven_url} -O maven.tgz'.format(**conf)) message('Installing apache maven') run('tar xzf maven.tgz') run('rm -rf maven/') run('mv apache-maven-* maven') run('cp -rf maven {services_root}'.format(**conf)) # Upload configuration file. upload_template( os.path.join(CONF_ROOT, 'tomcat', 'server.xml'), '{services_root}/tomcat/conf'.format(**conf), context=conf)
def openvpn_build(): """ Build the OpenVPN docker image """ openvpn_vars = _openvpn_config_vars() docker_vars = _openvpn_docker_vars() work_dir = docker_vars['work_dir'] # OpenVPN configuration fabtools.require.files.directories([work_dir]) upload_template('openvpn/configs/openvpn_server.conf', work_dir + 'server.conf', openvpn_vars) # SSH configuration put ('private/ssh/id_rsa_devbox.pub', work_dir + '/id_rsa.pub') # Supervisor put('openvpn/supervisord.conf', work_dir) # Dockerfile put('openvpn/Dockerfile', work_dir) # build with cd(work_dir): # <username / private repo address>/repo_name>:tag run('docker build -t %(image)s .' % docker_vars)
def setup_ringsh(ring, supervisor_host, node_host=None): """ Install and configure ringsh. :param ring: ring name (dso name) :type ring: string :param supervisor_host: hostname or ip of the supervisor :type supervisor_host: string """ if node_host is not None: node_section = { 'address': node_host, 'chordPort': 4244, 'adminPort': '6444', 'dsoName': ring, } else: node_section = 'None' install_packages('scality-ringsh') upload_template( filename=abspath('assets/config.py'), destination='/usr/local/scality-ringsh/ringsh', context={ 'mgmtuser': CREDENTIALS['mgmtuser'], 'mgmtpass': CREDENTIALS['mgmtpass'], 'supervisor_host': supervisor_host, 'node': node_section, }, use_sudo=True, )
def _upload_template(params): """ Run command to render and upload a template text file to a remote host. """ upload_template(filename=_render(params['filename']), destination=_render(params['destination']), use_sudo=True)
def add_gunicorn_script(): """ Deploys a script to run django. """ filename = "%s/fabfile/templates/gunicorn.sh" filename %= env.local_root_dir destination = env.server_root_dir context = {"user": env.user, "server_root_dir": env.server_root_dir, "django_port": env.django_port} upload_template(filename, destination, context=context, mode=0776)
def _install(self, rabbit_hosts, rabbit_pass, auth_uri, auth_url, neutron_pass, public_interface, local_ip): print red(env.host_string + ' | Install the components') sudo('apt-get update') sudo('apt-get -y install neutron-plugin-linuxbridge-agent conntrack') print red(env.host_string + ' | Update /etc/neutron/neutron.conf') with open('tmp_neutron_conf_' + env.host_string, 'w')as f: f.write(conf_neutron_conf) files.upload_template(filename='tmp_neutron_conf_' + env.host_string, destination='/etc/neutron/neutron.conf', use_jinja=True, use_sudo=True, context={'rabbit_hosts': rabbit_hosts, 'rabbit_password': rabbit_pass, 'auth_uri': auth_uri, 'auth_url': auth_url, 'neutron_pass': neutron_pass}) os.remove('tmp_neutron_conf_' + env.host_string) print red(env.host_string + ' | Update /etc/neutron/plugins/ml2/linuxbridge_agent.ini') with open('tmp_linuxbridge_agent_ini_' + env.host_string, 'w')as f: f.write(conf_linuxbridge_agent_ini) files.upload_template(filename='tmp_linuxbridge_agent_ini_' + env.host_string, destination='/etc/neutron/plugins/ml2/linuxbridge_agent.ini', use_jinja=True, use_sudo=True, context={'public_interface': public_interface, 'local_ip': local_ip}) os.remove('tmp_linuxbridge_agent_ini_' + env.host_string) print red(env.host_string + ' | Restart Services') sudo('service nova-compute restart') sudo('service neutron-plugin-linuxbridge-agent restart')
def deploy(): """ rsync code to remote host """ require('root', provided_by=('staging', 'production')) if env.environment == 'production': if not console.confirm('Are you sure you want to deploy production?', default=False): utils.abort('Production deployment aborted.') # defaults rsync options: # -pthrvz # -p preserve permissions # -t preserve times # -h output numbers in a human-readable format # -r recurse into directories # -v increase verbosity # -z compress file data during the transfer extra_opts = '--omit-dir-times' rsync_project( env.root, exclude=RSYNC_EXCLUDE, delete=True, extra_opts=extra_opts, ) # upload django.wsgi file source = os.path.join('deploy', 'django.wsgi') dest = os.path.join(env.code_root, 'deploy', '%(environment)s.wsgi' % env) files.upload_template(source, dest, env) # fix permissions sudo('chown -R wwwpub %s' % env.home) sudo('chmod -R a+rw %s' % env.home)
def prime_init(remote="live", commit=False): conf = env.conf[remote] """ Milestones: * init site repo * add remote to hub * optional: add and commit and push to hub * upload post-commit hook template """ run("mkdir -p %s" % conf["SITE_DIR"]) with cd(conf["SITE_DIR"]): run("git init") run("git remote add hub %s" % conf["REPO_DIR"]) if commit: put(os.path.join(_get_template_dir(conf), "templates/git/gitignore"), ".gitignore") run("git add .") run('git commit -m "first commit"') run("git push hub master") files.upload_template( "hooks/post-commit", ".git/hooks/post-commit", conf, True, template_dir=os.path.join(_get_template_dir(conf), "templates/git"), ) run("chmod +x .git/hooks/post-commit")
def copy_virtual_host_config(): upload_template(config_source, config_destination, use_sudo=True, backup=False, context={ 'port': apache_port, 'server_name': apache_server_name, 'site_wsgi_path': site_wsgi_path, 'logs_dir': logs_dir, })
def install_app(): """Installs the django project in its own wf app and virtualenv """ response = _webfaction_create_app(env.project) env.app_port = response['port'] # upload template to supervisor conf upload_template('templates/gunicorn.conf', '%s/conf.d/%s.conf' % (env.supervisor_dir,env.project), { 'project': env.project, 'project_dir': env.settings_dir, 'virtualenv':'%s/%s' % (env.virtualenv_dir, env.project), 'port': env.app_port, 'user': env.user, } ) with cd(env.home + '/webapps'): if not exists(env.project_dir + '/setup.py'): run('git clone %s %s' % (env.repo ,env.project_dir)) _create_ve(env.project) reload_app() restart_app()
def _install(self, connection, rabbit_hosts, rabbit_pass, auth_uri, auth_url, cinder_pass, my_ip, glance_host, rbd_secret_uuid, populate=False): print red(env.host_string + ' | Install the cinder-api and cinder-volume') sudo('apt-get update') sudo('apt-get -y install cinder-api cinder-scheduler python-cinderclient cinder-volume python-mysqldb') print red(env.host_string + ' | Update /etc/cinder/cinder.conf') with open('tmp_cinder_conf_' + env.host_string, 'w') as f: f.write(conf_cinder_conf) files.upload_template(filename='tmp_cinder_conf_' + env.host_string, destination='/etc/cinder/cinder.conf', use_jinja=True, use_sudo=True, context={'connection': connection, 'rabbit_hosts': rabbit_hosts, 'rabbit_password': rabbit_pass, 'auth_uri': auth_uri, 'auth_url': auth_url, 'cinder_pass': cinder_pass, 'my_ip': my_ip, 'glance_host': glance_host, 'rbd_secret_uuid': rbd_secret_uuid}) os.remove('tmp_cinder_conf_' + env.host_string) if populate: print red(env.host_string + ' | Populate the Block Storage database') sudo('su -s /bin/sh -c "cinder-manage db sync" cinder', shell=False) print red(env.host_string + ' | Restart the services') sudo('service nova-api restart', warn_only=True) sudo('service cinder-scheduler restart') sudo('service cinder-api restart') sudo('service tgt restart', warn_only=True) sudo('service cinder-volume restart') print red(env.host_string + ' | Remove the SQLite database file') sudo('rm -f /var/lib/cinder/cinder.sqlite')
def upload_template_and_reload(name): """ Uploads a template only if it has changed, and if so, reload a related service. """ template = get_templates()[name] local_path = template["local_path"] remote_path = template["remote_path"] reload_command = template.get("reload_command") owner = template.get("owner") mode = template.get("mode") remote_data = "" if exists(remote_path): with hide("stdout"): remote_data = sudo("cat %s" % remote_path) with open(local_path, "r") as f: local_data = f.read() # if "%(db_pass)s" in local_data: # env.db_pass = db_pass() local_data %= env clean = lambda s: s.replace("\n", "").replace("\r", "").strip() if clean(remote_data) == clean(local_data): return upload_template(local_path, remote_path, env, use_sudo=True, backup=False) if owner: sudo("chown %s %s" % (owner, remote_path)) if mode: sudo("chmod %s %s" % (mode, remote_path)) if reload_command: sudo(reload_command)
def install_config(): slack_token = keyring.get_password('familyfortress.slack.com', 'pmxbot') db_pass = keyring.get_password('mongodb.jaraco.com/family_fortress', 'pmxbot') twilio_token = keyring.get_password('Twilio', 'jaraco') or '' google_trans_key = keyring.get_password('api.google.com/freenode', 'pmxbot') wolframalpha_key = keyring.get_password('https://api.wolframalpha.com/', 'jaraco') sudo('mkdir -p /etc/pmxbot') files.upload_template('pmxbot.conf', '/etc/pmxbot/main.conf', use_sudo=True) files.upload_template('web.conf', '/etc/pmxbot/web.conf', use_sudo=True) if slack_token or not files.exists('/etc/pmxbot/server.conf'): files.upload_template('server.conf', '/etc/pmxbot/server.conf', context={'slack token': slack_token}, use_sudo=True, mode=0o600) if db_pass or not files.exists('/etc/pmxbot/database.conf'): files.upload_template('database.conf', '/etc/pmxbot/database.conf', context=dict(password=db_pass), use_sudo=True, mode=0o600) if twilio_token or not files.exists('/etc/pmxbot/twilio.conf'): files.upload_template('twilio.conf', '/etc/pmxbot/twilio.conf', context=dict(token=twilio_token), use_sudo=True, mode=0o600) if google_trans_key or not files.exists('/etc/pmxbot/trans.conf'): files.upload_template('trans.conf', '/etc/pmxbot/trans.conf', context=dict(key=google_trans_key), use_sudo=True, mode=0o600) if wolframalpha_key or not files.exists('/etc/pmxbot/wolframalpha.conf'): files.upload_template('wolframalpha.conf', '/etc/pmxbot/wolframalpha.conf', context=dict(key=wolframalpha_key), use_sudo=True, mode=0o600)
def config_webserver(): if USE_SSL and USE_LETSENCRYPT: generate_ssl_certificate() print("Configuring webserver...", end="\t") try: if USE_SUBDOMAINS: if USE_SSL: files.upload_template( "../conf/nginx/ssl-subdomain-sentry", "/etc/nginx/sites-available/", context={ 'server_name': SUBDOMAINS['sentry'], 'certificate_path': env.ssl_cert_path, 'key_path': env.ssl_key_path, }, use_sudo=True, ) sudo("ln -nsf /etc/nginx/sites-available/ssl-subdomain-sentry " "/etc/nginx/sites-enabled/") else: files.upload_template( "../conf/nginx/subdomain-sentry", "/etc/nginx/sites-available/", context={'server_name': SUBDOMAINS['sentry']}, use_sudo=True, ) sudo("ln -nsf /etc/nginx/sites-available/subdomain-sentry " "/etc/nginx/sites-enabled/") else: put("../conf/nginx/location-sentry", "/etc/nginx/sites-available/", use_sudo=True) if USE_SSL: files.upload_template( "../conf/nginx/ssl-server", "/etc/nginx/sites-available/", context={ 'server_name': DOMAIN, 'certificate_path': env.ssl_cert_path, 'key_path': env.ssl_key_path, }, use_sudo=True, ) sudo("ln -nsf /etc/nginx/sites-available/ssl-server " "/etc/nginx/sites-enabled/") else: files.upload_template( "../conf/nginx/server", "/etc/nginx/sites-available/", context={'server_name': DOMAIN}, use_sudo=True, ) sudo("ln -nsf /etc/nginx/sites-available/server " "/etc/nginx/sites-enabled/") sudo("rm -f /etc/nginx/sites-enabled/default") files.upload_template( "../conf/uwsgi/sentry.ini", "/etc/uwsgi/apps-available/", context={'dir': env.dir}, use_sudo=True, ) if not files.exists("/etc/uwsgi/apps-enabled/sentry.ini"): sudo("ln -s /etc/uwsgi/apps-available/sentry.ini " "/etc/uwsgi/apps-enabled/") print_succeed() except AbortException as e: print_fail(e)
def send_shipper_conf(node_type, log_dir, cluster_jobs, redis_ip_job_status, cluster_metrics, redis_ip_metrics): role, hysds_dir, hostname = resolve_role() ctx = get_context(node_type) if node_type == 'mozart': ctx.update({ 'cluster_jobs': cluster_jobs, 'cluster_metrics': cluster_metrics }) upload_template('indexer.conf.mozart', '~/mozart/etc/indexer.conf', use_jinja=True, context=ctx, template_dir=os.path.join( ops_dir, 'mozart/ops/hysds/configs/logstash')) upload_template('job_status.template', '~/mozart/etc/job_status.template', use_jinja=True, template_dir=os.path.join( ops_dir, 'mozart/ops/hysds/configs/logstash')) upload_template('worker_status.template', '~/mozart/etc/worker_status.template', use_jinja=True, template_dir=os.path.join( ops_dir, 'mozart/ops/hysds/configs/logstash')) upload_template('task_status.template', '~/mozart/etc/task_status.template', use_jinja=True, template_dir=os.path.join( ops_dir, 'mozart/ops/hysds/configs/logstash')) upload_template('event_status.template', '~/mozart/etc/event_status.template', use_jinja=True, template_dir=os.path.join( ops_dir, 'mozart/ops/hysds/configs/logstash')) elif node_type == 'metrics': ctx.update({ 'cluster_jobs': cluster_jobs, 'cluster_metrics': cluster_metrics }) upload_template('indexer.conf.metrics', '~/metrics/etc/indexer.conf', use_jinja=True, context=ctx, template_dir=os.path.join( ops_dir, 'mozart/ops/hysds/configs/logstash')) upload_template('job_status.template', '~/metrics/etc/job_status.template', use_jinja=True, template_dir=os.path.join( ops_dir, 'mozart/ops/hysds/configs/logstash')) upload_template('worker_status.template', '~/metrics/etc/worker_status.template', use_jinja=True, template_dir=os.path.join( ops_dir, 'mozart/ops/hysds/configs/logstash')) upload_template('task_status.template', '~/metrics/etc/task_status.template', use_jinja=True, template_dir=os.path.join( ops_dir, 'mozart/ops/hysds/configs/logstash')) upload_template('event_status.template', '~/metrics/etc/event_status.template', use_jinja=True, template_dir=os.path.join( ops_dir, 'mozart/ops/hysds/configs/logstash')) else: raise RuntimeError("Unknown node type: %s" % node_type)
def configure_bacula_master(path=None): """Upload configuration files for Bacula Master.""" opts = dict(path=path or env.get('path') or err('env.path must be set'), ) # XXX: Shouldn't we set file owner to bacula user, not the current user, # running the fabric commands? upload_template('%(path)s/etc/bacula-dir.conf' % opts, '/etc/bacula/bacula-dir.conf', use_sudo=True) upload_template('%(path)s/etc/bacula-sd.conf' % opts, '/etc/bacula/bacula-sd.conf', use_sudo=True) upload_template('%(path)s/etc/bconsole.conf' % opts, '/etc/bacula/bconsole.conf', use_sudo=True) upload_template('%(path)s/etc/pool_defaults.conf' % opts, '/etc/bacula/pool_defaults.conf', use_sudo=True) upload_template('%(path)s/etc/pool_full_defaults.conf' % opts, '/etc/bacula/pool_full_defaults.conf', use_sudo=True) upload_template('%(path)s/etc/pool_diff_defaults.conf' % opts, '/etc/bacula/pool_diff_defaults.conf', use_sudo=True) upload_template('%(path)s/etc/pool_inc_defaults.conf' % opts, '/etc/bacula/pool_inc_defaults.conf', use_sudo=True) sudo('service bacula-director restart')
def configure_deploy(): """Config deploy scripts""" upload_template('files/gestion/config.py', '/var/www/git-repo/azimut-deploy/config.py', { 'gestion': GESTION_NAME + '.' + env.server_name, })
def create(): """ Creates the environment needed to host the project. The environment consists of: system locales, virtualenv, database, project files, SSL certificate, and project-specific Python requirements. """ # Generate project locale locale = env.locale.replace("UTF-8", "utf8") with hide("stdout"): if locale not in run("locale -a"): sudo("locale-gen %s" % env.locale) sudo("update-locale %s" % env.locale) sudo("service postgresql restart") run("exit") # Create project path run("mkdir -p %s" % env.proj_path) # Set up virtual env run("mkdir -p %s" % env.venv_home) with cd(env.venv_home): if exists(env.proj_name): if confirm("Virtualenv already exists in host server: %s" "\nWould you like to replace it?" % env.proj_name): run("rm -rf %s" % env.proj_name) else: abort() run("python3 -m venv %s" % env.proj_name) # Upload project files if env.deploy_tool in env.vcs_tools: vcs_upload() else: rsync_upload() # Set up SSL certificate if not env.ssl_disabled: conf_path = "/etc/nginx/conf" if not exists(conf_path): sudo("mkdir %s" % conf_path) with cd(conf_path): crt_file = env.proj_name + ".crt" key_file = env.proj_name + ".key" if not exists(crt_file) and not exists(key_file): try: crt_local, = glob(join("deploy", "*.crt")) key_local, = glob(join("deploy", "*.key")) except ValueError: parts = (crt_file, key_file, env.domains[0]) sudo("openssl req -new -x509 -nodes -out %s -keyout %s " "-subj '/CN=%s' -days 3650" % parts) sudo("add-apt-repository -y ppa:certbot/certbot") sudo("apt-get update") sudo("apt-get install -y certbot") sudo( "certbot certonly --webroot --webroot-path=%s -d %s -d www.%s" % (env.proj_path, env.site, env.site)) else: upload_template(crt_local, crt_file, use_sudo=True) upload_template(key_local, key_file, use_sudo=True) # Install project-specific requirements upload_template_and_reload("settings") with project(): pip("--upgrade pip") if env.reqs_path: pip("-r %s/%s" % (env.proj_path, env.reqs_path)) pip("gunicorn setproctitle django-compressor python-memcached ") # Bootstrap the DB manage("makemigrations") manage("makemigrations theme") manage("migrate") python( "from django.conf import settings;" "from django.contrib.sites.models import Site;" "Site.objects.filter(id=settings.SITE_ID).update(domain='%s');" % env.domains[0]) for domain in env.domains: python("from django.contrib.sites.models import Site;" "Site.objects.get_or_create(domain='%s');" % domain) if env.admin_pass: pw = env.admin_pass user_py = ("from django.contrib.auth import get_user_model;" "User = get_user_model();" "u, _ = User.objects.get_or_create(username='******');" "u.is_staff = u.is_superuser = True;" "u.set_password('%s');" "u.save();" % pw) python(user_py, show=False) shadowed = "*" * len(pw) print_command(user_py.replace("'%s'" % pw, "'%s'" % shadowed)) return True
def deploy_web(): go_path = env.go_path db_path = env.httpcache_path code_root = env.code_root web_path = join(env.project_path, "www") log_file = env.ffweb_logfile if not exists(web_path): sudo('mkdir -p %s' % web_path) sudo('chown %s:%s %s' % (env.runner_user, env.runner_group, web_path)) sudo('chown %s %s' % (env.runner_user, dirname(log_file))) # key file template = 'conf/gauth.json' context = copy(env) key_path = '/srv/ff/gauth.json' upload_template(template, key_path, context=context, backup=False, use_sudo=True) sudo('chown %s:%s %s' % (env.runner_user, env.runner_group, key_path)) sudo('chmod 600 %s' % (key_path)) template = 'conf/ffweb.conf' context = copy(env) context.salt = open('conf/salt.conf').read().strip() context.config_file = '/srv/ff/config.json' context.web_path = web_path context.www_public_path = web_path upload_template(template, '/etc/init/ffweb.conf', context=context, backup=False, use_sudo=True) with shell_env(GOPATH=go_path): if not exists(code_root): run('git clone %s %s' % (env.repository, code_root)) if not exists("%s/bin/go-bindata" % go_path): run('go get -u github.com/jteeuwen/go-bindata/...') with cd(code_root): run('git reset --hard && git checkout master && git pull') run("cd %s/httpd && npm install && gulp && gulp release" % code_root) run("cd %s/httpd && %s/bin/go-bindata -pkg=server -o=./src/bindata.go static/... templates/" % (code_root, go_path)) run("cd %s/httpd && go get ." % code_root) run("cd httpd && go build") with cd(web_path): bin_path = join(code_root, 'httpd', 'httpd') tpl_path = join(code_root, 'httpd', 'templates') sudo("mv %s ffweb" % bin_path) sudo("cp -a %s . " % tpl_path) sudo('chown %s:%s %s -R' % (env.runner_user, env.runner_group, web_path)) with settings(warn_only=True): sudo("stop ffweb") sudo("start ffweb")
def setup_celery_backend(rds_host, user_key, user_secret): ''' The real configuration happens here. ''' logging.info('Updating Ubuntu\'s repository index') sudo('apt-get update') # Not sure why, but sometimes I get "E: Unable to locate package git" # trying to solve this with a sleep. time.sleep(2) sudo('apt-get update') logging.info('Installing ubuntu packages') for pkg in ['git', 'python-pip', 'joe', 'python-mysqldb', 'supervisor']: sudo('apt-get install -y -q %s' % pkg) logging.info('Getting celery application source code') with cd('/tmp/'): sudo('ssh-keyscan -H github.com > /root/.ssh/known_hosts') put(DEPLOY_PRIVATE_PATH, '/root/.ssh/id_rsa', use_sudo=True) put(DEPLOY_PUBLIC_PATH, '/root/.ssh/id_rsa.pub', use_sudo=True) sudo('chmod 600 /root/.ssh/id_rsa') sudo('chmod 600 /root/.ssh/id_rsa.pub') sudo('git clone %s' % VULNWEB_REPO) logging.info('Installing requirements.txt (this takes time!)') with cd('/tmp/nimbostratus-target/'): sudo('git checkout %s' % VULNWEB_BRANCH) sudo('pip install --use-mirrors --upgrade -r requirements.txt') vulnweb_root = '/tmp/nimbostratus-target/servers/django_frontend/vulnweb' logging.info('Configuring django-celery application') # Overwrite the application configuration files upload_template('servers/celery_backend/broker.config', '%s/vulnweb/broker.py' % vulnweb_root, context={ 'access': user_key, 'secret': user_secret }, backup=False, use_sudo=True) upload_template('servers/celery_backend/databases.config', '%s/vulnweb/databases.py' % vulnweb_root, context={ 'user': LOW_PRIV_USER, 'password': LOW_PRIV_PASSWORD, 'host': rds_host }, backup=False, use_sudo=True) upload_template('servers/celery_backend/supervisor.config', '/etc/supervisor/conf.d/celery.conf', context={'django_root_path': vulnweb_root}, backup=False, use_sudo=True) sudo('supervisorctl update') with cd(vulnweb_root): sudo('python manage.py syncdb --noinput')
def create(): """ Creates the environment needed to host the project. The environment consists of: system locales, virtualenv, database, project files, SSL certificate, and project-specific Python requirements. """ # Generate project locale locale = env.locale.replace("UTF-8", "utf8") with hide("stdout"): if locale not in run("locale -a"): sudo("locale-gen %s" % env.locale) sudo("update-locale %s" % env.locale) sudo("service postgresql restart") run("exit") # Create project path run("mkdir -p %s" % env.proj_path) # Set up virtual env run("mkdir -p %s" % env.venv_home) with cd(env.venv_home): if exists(env.proj_name): if confirm("Virtualenv already exists in host server: %s" "\nWould you like to replace it?" % env.proj_name): run("rm -rf %s" % env.proj_name) else: abort() run("virtualenv %s" % env.proj_name) # Upload project files if env.deploy_tool in env.vcs_tools: vcs_upload() else: rsync_upload() # Create DB and DB user pw = db_pass() user_sql_args = (env.proj_name, pw.replace("'", "\'")) user_sql = "CREATE USER %s WITH ENCRYPTED PASSWORD '%s';" % user_sql_args psql(user_sql, show=False) shadowed = "*" * len(pw) print_command(user_sql.replace("'%s'" % pw, "'%s'" % shadowed)) psql("CREATE DATABASE %s WITH OWNER %s ENCODING = 'UTF8' " "LC_CTYPE = '%s' LC_COLLATE = '%s' TEMPLATE template0;" % (env.proj_name, env.proj_name, env.locale, env.locale)) # Set up SSL certificate if not env.ssl_disabled: conf_path = "/etc/nginx/conf" if not exists(conf_path): sudo("mkdir %s" % conf_path) with cd(conf_path): crt_file = env.proj_name + ".crt" key_file = env.proj_name + ".key" if not exists(crt_file) and not exists(key_file): try: crt_local, = glob(join("deploy", "*.crt")) key_local, = glob(join("deploy", "*.key")) except ValueError: parts = (crt_file, key_file, env.domains[0]) sudo("openssl req -new -x509 -nodes -out %s -keyout %s " "-subj '/CN=%s' -days 3650" % parts) else: upload_template(crt_local, crt_file, use_sudo=True) upload_template(key_local, key_file, use_sudo=True) # Install project-specific requirements upload_template_and_reload("settings") with project(): if env.reqs_path: pip("-r %s/%s" % (env.proj_path, env.reqs_path)) pip("gunicorn setproctitle psycopg2 " "django-compressor python-memcached") # Bootstrap the DB manage("createdb --noinput --nodata") python("from django.conf import settings;" "from django.contrib.sites.models import Site;" "Site.objects.filter(id=settings.SITE_ID).update(domain='%s');" % env.domains[0]) for domain in env.domains: python("from django.contrib.sites.models import Site;" "Site.objects.get_or_create(domain='%s');" % domain) if env.admin_pass: pw = env.admin_pass user_py = ("from django.contrib.auth import get_user_model;" "User = get_user_model();" "u, _ = User.objects.get_or_create(username='******');" "u.is_staff = u.is_superuser = True;" "u.set_password('%s');" "u.save();" % pw) python(user_py, show=False) shadowed = "*" * len(pw) print_command(user_py.replace("'%s'" % pw, "'%s'" % shadowed)) return True
def install_configure0(*args, **kwargs): apt_install = False if apt_install: apt_depends("webhook") elif not cmd_avail("webhook"): go.install0() run("go get github.com/adnanh/webhook") if (kwargs.get("HOOK_PORT") == 443 or kwargs.get("HOOK_KEY") or kwargs.get("HOOK_CERT")): kwargs["HOOK_SECURE"] = True if not kwargs.get("HOOK_IP") and kwargs.get("SERVER_NAME"): kwargs["HOOK_IP"] = kwargs["SERVER_NAME"] if not kwargs.get("HOOK_HOOKS"): kwargs["HOOK_HOOKS"] = "/etc/webhook.json" else: sudo('mkdir -p "${' + kwargs["HOOK_HOOKS"] + '##*/}"', shell_escape=False) sio = StringIO() dump(kwargs["HOOK_HOOKS_JSON"], sio) tmp = "{}.tmp".format(kwargs["HOOK_HOOKS"]) put(sio, tmp, use_sudo=True) sudo("cat {tmp} | envsubst > {hooks} && rm {tmp}".format( tmp=tmp, hooks=kwargs["HOOK_HOOKS"])) if "HOOK_NOPANIC" not in kwargs: kwargs["HOOK_NOPANIC"] = "" # true elif not kwargs["HOOK_NOPANIC"]: del kwargs["HOOK_NOPANIC"] upload_template( hook_dir("webhook.service"), "/lib/systemd/system/", context={ "CMD": "/usr/bin/webhook" if apt_install else run( 'echo "$GOPATH/bin/webhook"', quiet=True, shell_escape=False), "ARGS": " ".join("-{cli_arg} '{cli_val}'".format( cli_arg=cli_arg, cli_val=kwargs["HOOK_{}".format(cli_arg.upper())]) for cli_arg in ( "cert", "header", "hooks", "hotreload", "ip", "key", "nopanic", "port", "secure", "template", "verbose", ) if "HOOK_{}".format(cli_arg.upper()) in kwargs).replace( " ''", "").replace(" 'True'", ""), }, use_sudo=True, ) return restart_systemd("webhook")
def create(): """ Create a virtual environment, pull the project's repo from version control, add system-level configs for the project, and initialise the database with the live host. """ # Create virtualenv with cd(env.venv_home): if exists(env.proj_name): prompt = raw_input("\nVirtualenv exists: %s\nWould you like " "to replace it? (yes/no) " % env.proj_name) if prompt.lower() != "yes": print "\nAborting!" return False remove() run("virtualenv %s --distribute" % env.proj_name) vcs = "git" if env.repo_url.startswith("git") else "hg" run("%s clone %s %s" % (vcs, env.repo_url, env.proj_path)) # Create DB and DB user. pw = db_pass() user_sql_args = (env.proj_name, pw.replace("'", "\'")) user_sql = "CREATE USER %s WITH ENCRYPTED PASSWORD '%s';" % user_sql_args psql(user_sql, show=False) shadowed = "*" * len(pw) print_command(user_sql.replace("'%s'" % pw, "'%s'" % shadowed)) psql("CREATE DATABASE %s WITH OWNER %s ENCODING = 'UTF8' " "LC_CTYPE = '%s' LC_COLLATE = '%s' TEMPLATE template0;" % (env.proj_name, env.proj_name, env.locale, env.locale)) # Set up SSL certificate. conf_path = "/etc/nginx/conf" if not exists(conf_path): sudo("mkdir %s" % conf_path) with cd(conf_path): crt_file = env.proj_name + ".crt" key_file = env.proj_name + ".key" if not exists(crt_file) and not exists(key_file): try: crt_local, = glob(os.path.join("deploy", "*.crt")) key_local, = glob(os.path.join("deploy", "*.key")) except ValueError: parts = (crt_file, key_file, env.live_host) sudo("openssl req -new -x509 -nodes -out %s -keyout %s " "-subj '/CN=%s' -days 3650" % parts) else: upload_template(crt_file, crt_local, use_sudo=True) upload_template(key_file, key_local, use_sudo=True) # Set up project. upload_template_and_reload("settings") with project(): if env.reqs_path: pip("-r %s/%s" % (env.proj_path, env.reqs_path)) pip("gunicorn setproctitle south psycopg2 " "django-compressor python-memcached") manage("createdb --noinput") python("from django.conf import settings;" "from django.contrib.sites.models import Site;" "site, _ = Site.objects.get_or_create(id=settings.SITE_ID);" "site.domain = '" + env.live_host + "';" "site.save();") if env.admin_pass: pw = env.admin_pass user_py = ("from django.contrib.auth.models import User;" "u, _ = User.objects.get_or_create(username='******');" "u.is_staff = u.is_superuser = True;" "u.set_password('%s');" "u.save();" % pw) python(user_py, show=False) shadowed = "*" * len(pw) print_command(user_py.replace("'%s'" % pw, "'%s'" % shadowed)) return True
def configure_ssh(): upload_template('files/gestion/config', '/home/www-data/.ssh/config') sudo("chown www-data:www-data /home/www-data/.ssh/config")
def setup_git_pull_upstart2(*args, **kwargs): if exists("/run/systemd/system"): raise NotImplementedError("SystemD not implemented yet") apt_depends("inotify-tools") default_conf = { "AUTHOR": __author__, "DESCRIPTION": "git pull (force update to whatever is on git repo)", "GIT_BRANCH": "master", "GIT_REMOTE": "origin", "HOOKSERVE_LOGFILE": "/var/log/upstart/{service}.log".format( service=kwargs.get("hookserve-init-name", "hookserve.conf").partition( path.extsep )[0] ), } if "SERVER_LOCATION" in kwargs.get("git_pull-init-context", {}): kwargs["git_pull-init-context"]["GIT_DIR"] = kwargs["git_pull-init-context"][ "SERVER_LOCATION" ] required = ( ("GIT_DIR", "/var/www/somegitrepo"), ("GIT_REPO", "https://github.com/foo/bar.git"), ) validate_conf( kwargs.get("git_pull-init-context", {}), required, logger=logger, name="git_pull-init-context", ) if not exists(kwargs["git_pull-init-context"]["GIT_DIR"]): sudo( "git clone {git_repo} {to_dir}".format( git_repo=kwargs["git_pull-init-context"]["GIT_REPO"], to_dir=kwargs["git_pull-init-context"]["GIT_DIR"], ) ) init_name = kwargs.get("git_pull-init-name", "git_pull.conf") init_dir = kwargs.get("git_pull-init-dir", "/etc/init") init_local_filename = kwargs.get( "git_pull-upstart-filename", resource_filename( "offregister_githook", path.join("conf", "git_pull.upstart.job.conf") ), ) service = init_name.partition(".")[0] upload_template( init_local_filename, "{init_dir}/{init_name}".format(init_dir=init_dir, init_name=init_name), context=update_d(default_conf, kwargs.get("git_pull-init-context", {})), use_sudo=True, ) status_cmd = "status {service}".format(service=service) if "start/running" in run(status_cmd): sudo("stop {service}".format(service=service)) logger.info(sudo("start {service}".format(service=service))) return run(status_cmd)
def upload_template_and_reload(name, templates=None): """ Uploads a template only if it has changed, and if so, reload a related service. """ template = get_templates(templates)[name] local_path = os.path.abspath(template["local_path"]) if not os.path.exists(local_path): project_root = os.path.dirname(os.path.abspath(__file__)) local_path = os.path.join(project_root, local_path) remote_path = template["remote_path"] reload_command = template.get("reload_command") owner = template.get("owner") mode = template.get("mode") # populate the values for the template substitution with the current # fabric environment values = copy(env) # if the template config has an 'extras' dict, step through it and # assign the key to the values dict; the value of the new element # can either be a literal or a callable. If it's a callable, call it. for (k, v) in template.get("extras", {}).items(): # the value may be a full dotted module path (eg. cotton.fabfile.utils.get_hostname). # If so, check to see if everything up to the last dot is listed in sys.modules. If it is, # it's a module, so check to see if it has a an attribute matching the last portion of the # value. If it does, and it's callable, call it and use the return value for the template # variable, otherwise use the original string. parts = v.split('.') module = sys.modules['.'.join(parts[:-1])] meth = getattr(module, parts[-1:][0], None) setattr(values, k, meth() if meth and callable(meth) else v) remote_data = "" if exists(remote_path): with hide("stdout"): remote_data = sudo("cat %s" % remote_path) with open(local_path, "r") as f: local_data = f.read() # Escape all non-string-formatting-placeholder occurrences of '%': local_data = re.sub(r"%(?!\(\w+\)s)", "%%", local_data) local_data %= values clean = lambda s: s.replace("\n", "").replace("\r", "").strip() # no changes, so no need to rewrite the file if clean(remote_data) == clean(local_data): return # upload the updated file and set its owner/perms, if necessary print "Uploading %s => %s" % (local_path, remote_path) upload_template(local_path, remote_path, values, use_sudo=not env.user == "root", backup=False) if owner: sudo("chown %s %s" % (owner, remote_path)) if mode: sudo("chmod %s %s" % (mode, remote_path)) # if there's a reload command, execute it now if reload_command: sudo(reload_command)
def install_circus2(circus_env=None, circus_cmd=None, circus_args=None, circus_name=None, circus_home=None, circus_venv="/opt/venvs/circus", remote_user="******", virtual_env=None, use_sudo=False, *args, **kwargs): if (circus_cmd is None or circus_args is None or circus_name is None or circus_home is None): return "insufficient args, skipping circus" virtual_env = virtual_env or "{home}/venvs/tflow".format( home=run("echo $HOME", quiet=True)) conf_dir = "/etc/circus/conf.d" # '/'.join((taiga_root, 'config')) sudo("mkdir -p {conf_dir}".format(conf_dir=conf_dir)) if not use_sudo: user, group = run("echo $(id -un; id -gn)").split(" ") sudo("mkdir -p {circus_venv} {virtual_env}".format( circus_venv=circus_venv, virtual_env=virtual_env)) sudo("chown -R {user}:{group} {circus_venv} {virtual_env} {conf_dir}". format( user=user, group=group, circus_venv=circus_venv, virtual_env=virtual_env, conf_dir=conf_dir, )) install_venv0(python3=False, virtual_env=circus_venv, use_sudo=use_sudo) run_cmd = partial(_run_command, sudo=use_sudo) run_cmd("mkdir -p {circus_home}/logs".format(circus_home=circus_home)) with shell_env(VIRTUAL_ENV=circus_venv, PATH="{}/bin:$PATH".format(circus_venv)): run_cmd("pip install circus") py_ver = run("python --version").partition(" ")[2][:3] upload_template( offpy_dir("circus.ini"), "{conf_dir}/".format(conf_dir=conf_dir), context={ "ENDPOINT_PORT": 5555, "WORKING_DIR": kwargs.get("circus_working_dir", circus_home), "CMD": circus_cmd, "ARGS": circus_args, "NAME": circus_name, "USER": remote_user, "HOME": circus_home, "VENV": virtual_env, "CIRCUS_ENV": "" if circus_env is None else "\n".join( "{}={}".format(k, v) for k, v in iteritems(circus_env)), "PYTHON_VERSION": py_ver, }, use_sudo=use_sudo, ) circusd_context = {"CONF_DIR": conf_dir, "CIRCUS_VENV": circus_venv} if exists("/etc/systemd/system"): upload_template( offpy_dir("circusd.service"), "/etc/systemd/system/", context=circusd_context, use_sudo=True, backup=False, ) sudo("systemctl daemon-reload") else: upload_template( offpy_dir("circusd.conf"), "/etc/init/", context=circusd_context, use_sudo=True, backup=False, ) return circus_venv
def setup_hookserve1(*args, **kwargs): os_version = ubuntu_version() default_conf = { "AUTHOR": __author__, "DESCRIPTION": "hookserve git hook server and git pull task", "DAEMON": "/usr/local/bin/hookserve", "DAEMON_ARGS": "--port={DAEMON_PORT:d} echo", "DAEMON_PORT": 8888, "PID": "/var/run/hookserve.pid", } context = update_d(default_conf, kwargs.get("hookserve-init-context", {})) context["DAEMON_ARGS"] = context["DAEMON_ARGS"].format( DAEMON_PORT=context["DAEMON_PORT"] ) if os_version < 15.04: init_dir = kwargs.get("hookserve-init-dir", "/etc/init") init_name = ( "{}.service".format(kwargs["hookserve-init-name"]) if "hookserve-init-name" in kwargs else "hookserve.conf" ) init_local_filename = kwargs.get( "hookserve-upstart-filename", resource_filename( "offregister_githook", path.join("conf", "hookserve.upstart.conf") ), ) service = init_name.partition(".")[0] upload_template( init_local_filename, "{init_dir}/{init_name}".format(init_dir=init_dir, init_name=init_name), context=context, use_sudo=True, ) status_cmd = "status {service}".format(service=service) if "start/running" in run(status_cmd): sudo("stop {service}".format(service=service)) sudo("start {service}".format(service=service)) return run(status_cmd) else: unit_name = ( "{}.unit".format(kwargs["hookserve-init-name"]) if "hookserve-init-name" in kwargs else "hookserve.unit" ) unit_local_filename = kwargs.get( "hookserve-upstart-filename", resource_filename( "offregister_githook", path.join("conf", "hookserve.systemd.unit") ), ) service = unit_name.partition(".")[0] unit_file = "/etc/systemd/system/{init_name}".format(init_name=unit_name) upload_template(unit_local_filename, unit_file, context=context, use_sudo=True) sudo("chmod 664 {unit_file}".format(unit_file=unit_file)) sudo("systemctl daemon-reload") status_cmd = "systemctl status {service}".format(service=service) if "start/running" in run(status_cmd): sudo("systemctl stop {service}".format(service=service)) sudo("systemctl start {service}".format(service=service)) return run(status_cmd)
def postinstall_rootfs(build_date): """ Post install rootfs GNU/Linux Debian """ build_dir = TMP_DIR + "/" + BUILD_PREFIX + build_date rootfs_dir = build_dir + "/rootfs-" + build_date # Set hostname with cd(rootfs_dir): run("rm etc/hostname") run("ln -s /var/local/config/hostname etc/hostname") # Add RPI firmware libraries to the cache upload_template('template/rootfs/etc/ld.so.conf.d/vc.conf', "%s/etc/ld.so.conf.d/vc.conf" % rootfs_dir) # Create tmpfs mount points if not exists("%s/var/tmp" % rootfs_dir): run("mkdir -p %s/var/tmp " % rootfs_dir) if not exists("%s/var/lib/dhcp" % rootfs_dir): run("mkdir -p %s/var/lib/dhcp " % rootfs_dir) if not exists("%s/var/volatile" % rootfs_dir): run("mkdir -p %s/var/volatile " % rootfs_dir) # Set source list run("echo \"deb http://mirrordirector.raspbian.org/raspbian/ jessie \ main contrib non-free rpi\" > %s/etc/apt/sources.list" % rootfs_dir) run("echo \"deb http://archive.raspberrypi.org/debian/ jessie main\" \ >> %s/etc/apt/sources.list" % rootfs_dir) run("chroot %s wget http://archive.raspberrypi.org/debian/raspberrypi.gpg.key \ -O - | apt-key add - " % rootfs_dir) run("chroot %s apt-get update" % rootfs_dir) # Copy configuration template upload_template('template/rootfs/etc/fstab', "%s/etc/fstab" % rootfs_dir) upload_template('template/rootfs/etc/hosts', "%s/etc/hosts" % rootfs_dir) upload_template('template/rootfs/etc/network/interfaces', "%s/etc/network/interfaces" % rootfs_dir) # Copy NFS mount script upload_template('template/rootfs/etc/init.d/iotlab_nfs_mount', "%s/etc/init.d/iotlab_nfs_mount" % rootfs_dir) run("chmod +x %s/etc/init.d/iotlab_nfs_mount" % rootfs_dir) run("chroot %s update-rc.d iotlab_nfs_mount defaults" % rootfs_dir) # Create NFS mounts directory if not exists("%s/var/local/config" % rootfs_dir): run("mkdir -p %s/var/local/config " % rootfs_dir) if not exists("%s/iotlab/users" % rootfs_dir): run("mkdir -p %s/iotlab/users " % rootfs_dir) # Configure timezone run("echo \"Europe/Paris\" > %s/etc/timezone" % rootfs_dir) run("chroot %s dpkg-reconfigure -f noninteractive tzdata" % rootfs_dir) # Disable udev net rule generation with cd(rootfs_dir): run('ln -s /dev/null etc/udev/rules.d/75-persistent-net-generator.rules' ) # Install needed packages install_packages(rootfs_dir) # Install SSH install_ssh(rootfs_dir) copy_ssh_keys(rootfs_dir) # Install OML install_oml2(rootfs_dir) # Install IoT-LAB Gateway install_iotlab_gateway(rootfs_dir) # Install LLDPD daemon install_lldp(rootfs_dir) # configure NTP upload_template('template/rootfs/etc/ntp.conf', "%s/etc/ntp.conf" % rootfs_dir)
def install_app(rackspace=False): run("[ -d app ] || git clone [email protected]:davidkhess/whoop.git app") run("[ -d var/run ] || mkdir -p var/run") run("[ -d var/log ] || mkdir -p var/log") with cd("app"): with prefix(VIRTUAL_ENV): run("./manage.py generate_secret_key") run("./manage.py drop --yes") run("./manage.py create") run("./manage.py load") run("bower install") with cd("app/static/vendor/superagent"): run("npm install") run("make superagent.js") upload_template("etc/nginx.conf", "/etc/nginx/sites-enabled/app.conf", use_sudo=True, mode=0644, context={"username": env.user}) sudo("chown root:root /etc/nginx/sites-enabled/app.conf") sudo("rm /etc/nginx/sites-enabled/default") sudo("[ -d /etc/nginx/keys ] || mkdir /etc/nginx/keys") put("etc/app.crt", "/etc/nginx/keys", use_sudo=True, mode=0644) sudo("chown root:root /etc/nginx/keys/app.crt") put("etc/app.key", "/etc/nginx/keys", use_sudo=True, mode=0644) sudo("chown root:root /etc/nginx/keys/app.key") upload_template("etc/supervisord.conf", "/etc/supervisor/conf.d/app.conf", use_sudo=True, mode=0644, context={"username": env.user}) sudo("chown root:root /etc/supervisor/conf.d/app.conf") sudo("invoke-rc.d nginx restart") sudo("supervisorctl update") sudo("supervisorctl restart 'app:*'") # Set up lets encrypt. if rackspace: sudo( "git clone https://github.com/letsencrypt/letsencrypt /opt/letsencrypt" ) sudo("mkdir /opt/letsencrypt/.well-known") sudo( "/opt/letsencrypt/letsencrypt-auto certonly -a webroot --webroot-path=/opt/letsencrypt -d app.justwhoop.com" ) sudo("rm /etc/nginx/keys/app.key") sudo("rm /etc/nginx/keys/app.crt") sudo( "ln -s /etc/letsencrypt/live/app.justwhoop.com/privkey.pem app.key" ) sudo( "ln -s /etc/letsencrypt/live/app.justwhoop.com/fullchain.pem app.crt" ) sudo("invoke-rc.d nginx restart") put("etc/crontab", "bootstrap") sudo("crontab bootstrap/crontab")
def step0(domain, *args, **kwargs): key_file = "/root/.ssh/id_rsa.pub" config = { "DOKKU_HOSTNAME": ("hostname", domain), "DOKKU_KEY_FILE": ("key_file", key_file), "DOKKU_SKIP_KEY_FILE": ("skip_key_file", False), "DOKKU_VHOST_ENABLE": ("vhost_enable", False), "DOKKU_WEB_CONFIG": ("web_config", False), } create_static = kwargs.get("create_static_page", True) static_git_url = kwargs.get( "static_git", environ.get("DOKKU_STATIC_GIT", environ.get("STATIC_GIT"))) local_pubkey = kwargs.get("PUBLIC_KEY_PATH") or environ.get( "DOKKU_PUBLIC_KEY_PATH", environ["PUBLIC_KEY_PATH"]) if not cmd_avail("docker"): docker.install_0() # docker.dockeruser_1() docker.serve_2() put(StringIO("pZPlHOkV649DCepEwf9G"), "/tmp/passwd") if not cmd_avail("dokku"): # is_installed('dokku'): run("wget -qN https://packagecloud.io/gpg.key") sudo("apt-key add gpg.key") append( "/etc/apt/sources.list.d/dokku.list", "deb https://packagecloud.io/dokku/dokku/ubuntu/ trusty main", use_sudo=True, ) put( StringIO("\n".join("{com} {com}/{var} {type} {val}".format( com="dokku", var=v[0], val=str(v[1]).lower() if type(v[1]) is type(bool) else v[1], type=(lambda t: { type(True): "boolean", type(""): "string", type(str): "string", }.get(t, t))(type(v[1])), ) for k, v in iteritems(config) if v[1] is not None)), "/tmp/dokku-debconf", ) sudo("debconf-set-selections /tmp/dokku-debconf") if not exists(key_file): sudo('ssh-keygen -t rsa -b 4096 -f {key_file} -N ""'.format( key_file=key_file)) apt_depends("dokku") sudo("dokku plugin:install-dependencies --core") put(local_pubkey, key_file) sudo("sshcommand acl-add dokku domain {key_file}".format( key_file=key_file)) return "installed dokku" if create_static: if run("getent passwd static", quiet=True, warn_only=True).failed: sudo("adduser static --disabled-password") sudo("mkdir /home/static/sites/", user="******") upload_template( path.join( path.dirname( resource_filename("offregister_dokku", "__init__.py")), "data", "static_sites.conf", ), "/etc/nginx/conf.d/static_sites.conf", use_sudo=True, ) if sudo("service nginx status").endswith("stop/waiting"): sudo("service nginx start") else: sudo("service nginx reload") # TODO: Abstract this out into a different module, and allow for multiple domains if static_git_url: ipv4 = "/home/static/sites/{public_ipv4}".format( public_ipv4=kwargs["public_ipv4"]) if exists(ipv4): sudo("rm -rf {ipv4}".format(ipv4=ipv4)) sudo("mkdir -p {ipv4}".format(ipv4=ipv4), user="******") if domain: domain = "/home/static/sites/{domain}".format(domain=domain) if not exists(domain): sudo( "ln -s {ipv4} {domain}".format(ipv4=ipv4, domain=domain), user="******", ) xip = "{ipv4}.xip.io".format(ipv4=ipv4) if not exists(xip): sudo("ln -s {ipv4} {xip}".format(ipv4=ipv4, xip=xip), user="******") if static_git_url: apt_depends("git") if isinstance(static_git_url, str): clone_or_update(**url_to_git_dict(static_git_url)) else: clone_or_update(to_dir=ipv4, **static_git_url) return "installed dokku [already]"
def install_ssh(rootfs_dir): """ Install and configure SSH server """ # Install needed packages run("chroot %s apt-get -y --force-yes install ssh" % rootfs_dir) # Copy SSH configuration upload_template('template/rootfs/etc/ssh/ssh_config', "%s/etc/ssh/ssh_config" % rootfs_dir) upload_template('template/rootfs/etc/ssh/sshd_config', "%s/etc/ssh/sshd_config" % rootfs_dir) # Copy SSH Keys upload_template('%s/template/rootfs/etc/ssh/ssh_host_dsa_key' % IBAT_KEYS_DIR, "%s/etc/ssh/ssh_host_dsa_key" % rootfs_dir, backup=False) upload_template('%s/template/rootfs/etc/ssh/ssh_host_dsa_key.pub' % IBAT_KEYS_DIR, "%s/etc/ssh/ssh_host_dsa_key.pub" % rootfs_dir, backup=False) upload_template('%s/template/rootfs/etc/ssh/ssh_host_ecdsa_key' % IBAT_KEYS_DIR, "%s/etc/ssh/ssh_host_ecdsa_key" % rootfs_dir, backup=False) upload_template('%s/template/rootfs/etc/ssh/ssh_host_ecdsa_key.pub' % IBAT_KEYS_DIR, "%s/etc/ssh/ssh_host_ecdsa_key.pub" % rootfs_dir, backup=False) upload_template('%s/template/rootfs/etc/ssh/ssh_host_ed25519_key' % IBAT_KEYS_DIR, "%s/etc/ssh/ssh_host_ed25519_key" % rootfs_dir, backup=False) upload_template('%s/template/rootfs/etc/ssh/ssh_host_ed25519_key.pub' % IBAT_KEYS_DIR, "%s/etc/ssh/ssh_host_ed25519_key.pub" % rootfs_dir, backup=False) upload_template('%s/template/rootfs/etc/ssh/ssh_host_rsa_key' % IBAT_KEYS_DIR, "%s/etc/ssh/ssh_host_rsa_key" % rootfs_dir, backup=False) upload_template('%s/template/rootfs/etc/ssh/ssh_host_rsa_key.pub' % IBAT_KEYS_DIR, "%s/etc/ssh/ssh_host_rsa_key.pub" % rootfs_dir, backup=False)
def setup_apache(site_name, code_path, domain, template_dir=None, media_dir=None, wsgi_user='******', **kwargs): """Set up the apache server for this site. :param site_name: Name of the site e.g. changelogger. Should be a single word with only alpha characters in it. :type site_name: str :param code_path: Directory where the code lives. Will be used to set media etc permissions. :type code_path: str :param domain: Domain name. If none will be set to hostname. :type domain: str :param template_dir: Directory where the template files live. If none will default to ``resources/server_config/apache``. Must be a relative path to the fabfile you are running. :type template_dir: str :param media_dir: Optional dir under code_path if media does not live in ``<code_path>/django_project/media``. No trailing slash. :type media_dir: str :param wsgi_user: Name of user wsgi process should run as. The user will be created as needed. :type wsgi_user: str :param kwargs: Any extra keyword arguments that should be appended to the token list that will be used when rendering the apache config template. Use this to pass in sensitive data such as passwords. :type kwargs: dict :returns: Path to the apache conf file. :rtype: str """ setup_env() # Ensure we have a mailserver setup for our domain # Note that you may have problems if you intend to run more than one # site from the same server require.postfix.server(site_name) require.deb.package('libapache2-mod-wsgi') # Find out if the wsgi user exists and create it if needed e.g. require.user(wsgi_user, create_group=wsgi_user, system=True, comment='System user for running the wsgi process under') # Clone and replace tokens in apache conf if template_dir is None: template_dir = 'resources/server_config/apache/' filename = '%s.apache.conf.templ' % site_name template_path = os.path.join(template_dir, filename) fastprint(green('Using %s for template' % template_path)) context = { 'escaped_server_name': domain.replace('.', '\.'), 'server_name': domain, 'site_user': wsgi_user, 'code_path': code_path, 'site_name': site_name } context.update(kwargs) # merge in any params passed in to this function destination = '/etc/apache2/sites-available/%s.apache.conf' % site_name fastprint(context) upload_template(template_path, destination, context=context, use_sudo=True) set_media_permissions(code_path, wsgi_user, media_dir=media_dir) sudo('a2ensite %s.apache.conf' % site_name) sudo('a2dissite default') sudo('a2enmod rewrite') # Check if apache configs are ok - script will abort if not ok sudo('/usr/sbin/apache2ctl configtest') require.service.restarted('apache2') return destination
def site_config(self, site): with hook('site config %s' % self.name, self, site): context = { 'site': site['name'], 'default_str': ' default_server' if self.settings.get('default') else '', 'app_location': '/', 'webapp_location': '', 'error_page_str': '', } if self.settings.has_key('log_level'): context['log_level'] = ' ' + self.settings['log_level'] else: context['log_level'] = '' if self.settings.get('ssl_cert') and self.settings.get( 'ssl_cert_key'): context['ssl_str'] = _NGINX_SSL % ( self.settings['ssl_cert'], self.settings['ssl_cert_key']) else: context['ssl_str'] = '' if self.settings.has_key('location_settings') and isinstance( self.settings['location_settings'], (list, tuple)): context['location_settings_str'] = '\n\t\t'.join([ '%s %s;' % setting for setting in self.settings['location_settings'] ]) else: context['location_settings_str'] = '' if self.settings.has_key('server_settings') and isinstance( self.settings['server_settings'], (list, tuple)): context['server_settings_str'] = '\n\t\t'.join([ '%s %s;' % setting for setting in self.settings['server_settings'] ]) else: context['server_settings_str'] = '' if site['type'] == SiteType.DJANGO: # Django site setup context['location_settings_str'] = '\n\t\t'.join( (context['location_settings_str'], 'uwsgi_pass unix:///var/run/uwsgi/app/%s/socket;' % site['name'], 'include uwsgi_params;')) context['allowed_hosts'] = ' '.join( get_django_setting(site, 'ALLOWED_HOSTS') or [site['name']]) # Setup the static and media locations locations = [] static_root = get_static_root(site) static_url = get_static_url(site) media_root = get_media_root(site) media_url = get_media_url(site) domain = _get_domain(static_url) if domain: locations.append( _NGINX_LOCATION_DOMAIN % (_slash_wrap(_get_path(static_url)), domain)) else: locations.append( _NGINX_LOCATION % (_slash_wrap(static_url), _slash_append(static_root))) domain = _get_domain(media_url) if domain: locations.append( _NGINX_LOCATION_DOMAIN % (_slash_wrap(_get_path(media_url)), domain)) else: locations.append( _NGINX_LOCATION % (_slash_wrap(media_url), _slash_append(media_root))) # Add any custom locations if self.settings.has_key('custom_locations'): if isinstance(self.settings['custom_locations'], (tuple, list)): for location in self.settings['custom_locations']: locations.append(location) else: locations.append(self.settings['custom_locations']) context['static_locations'] = '\n\n\t'.join(locations) # Configure the webapp if necessary webapp_root = get_webapp_root(site) if webapp_root: webapp_url = _slash_wrap(get_webapp_url(site)) webapp_index = get_django_setting( site, 'WEBAPP_INDEX') or 'index.html' if webapp_url == '/': context[ 'app_location'] = '@%s-app' % site['name'].replace( '.', '-') context['webapp_location'] = _NGINX_WEBAPP_LOCATION % ( webapp_url, _slash_append(webapp_root), webapp_index, context['app_location']) else: context['app_location'] = '/' context['webapp_location'] = _NGINX_WEBAPP_LOCATION % ( webapp_url, _slash_append(webapp_root), webapp_index, '=404') # Configure the error pages if present error_pages = [] if not env.get('vagrant'): for status_code in (400, 401, 403, 404, 500): static_name = '%d.html' % status_code result = find_static(site, static_name) if result: error_pages.append( 'error_page %d %s;' % (status_code, os.path.join(static_url, static_name))) context['error_page_str'] = '\n\t'.join(error_pages) else: # Not a django site context['allowed_hosts'] = site['name'] context['static_locations'] = '' if self.settings.get('ssl_only'): nginx_template = get_template('nginx-site-https.conf') else: nginx_template = get_template('nginx-site.conf') upload_template(nginx_template, '/etc/nginx/sites-available/%s.conf' % site['name'], context=context, use_sudo=True, mode=0644) sudo('chown root:root /etc/nginx/sites-available/%s.conf' % site['name']) # If site type is NGINX enable it right away because there is no deployment process for it if site['type'] == SiteType.NGINX: with hide('warnings'), settings(warn_only=True): sudo( 'ln -sf /etc/nginx/sites-available/%s.conf /etc/nginx/sites-enabled/%s.conf' % (site['name'], site['name'])) # If the site is the default, then remove the default that comes with nginx if self.settings.get('default'): sudo('rm -f /etc/nginx/sites-enabled/default') self.restart()
def configure(): """Upload configuration files""" files.upload_template(_('conf/settings/{env}.py'), _('{remote_env_path}/etc/environment_settings.py'), env) files.upload_template( _('conf/lighttpd.conf'), _('/etc/lighttpd/conf-enabled/90-{project}.{env}.conf'), env, ) files.upload_template('conf/manage.py.tmpl', _('{remote_env_path}/bin/manage.py'), env) run(_('chmod +x {remote_env_path}/bin/manage.py')) files.upload_template( 'conf/upstart.tmpl', _('/etc/init/{project}-{env}.conf'), env, ) fix_owner_and_permissions(_('/etc/init/{project}-{env}.conf')) files.upload_template( 'conf/logrotate.tmpl', _('/etc/logrotate.d/{project}-{env}'), env, ) fix_owner_and_permissions(_('/etc/logrotate.d/{project}-{env}')) files.upload_template( 'conf/crontab.tmpl', _('/etc/cron.d/{project}-{env}'), env, ) fix_owner_and_permissions(_('/etc/cron.d/{project}-{env}')) run(_('chown {www_user}:{www_user} /var/lib/www/{env}/ -R')) run(_('chown {www_user}:{www_user} {remote_env_path} -R'))
def add_site(filename, context): """ Deploys a new nginx configuration site. """ destination = '/etc/nginx/sites-available/{}-{}'.format( env.prefix, env.branch) upload_template(filename, destination, context=context, use_sudo=True)
def deploy_app(): click.echo("---------------------------------") click.echo(" Starting Deployment ") click.echo("---------------------------------") click.echo("MASTER: %s" % env.master) click.echo("LABEL: %s" % env.label) click.echo("---------------------------------") folder = "%s/%s" % (env.path, env.label) run("mkdir -p %s" % folder) run("mkdir -p %s/data" % folder) run("mkdir -p %s/backups" % folder) if env.is_ci: if env.variables: click.echo("Loading [PROJECT_ENVIRONMENT] variables ...") f = open('/tmp/.tempenv', 'w') f.write(env.variables) f.close() click.echo("[.environment] created...!!!") with cd(folder): upload_template( filename="/tmp/.tempenv", destination='%s/.environment' % folder, template_dir="./", ) click.echo("[.environment] uploaded and configured...") else: if isfile(".environment"): with cd(folder): upload_template( filename=".environment", destination='%s/.environment' % folder, template_dir="./", ) click.echo("---> [.environment] uploaded...!!!") if env.registry_host and env.registry_user: passwd = getpass( "\nPut for password por [%(user)s] in [%(host)s]: " % { "host": env.registry_host, "user": env.registry_user }) if passwd != '': run( "docker login %(host)s -u %(user)s -p '%(passwd)s'" % { "host": env.registry_host, "user": env.registry_user, "passwd": passwd }) else: sys.exit("Password is required...!") if isfile("docker-compose.yml"): with cd(folder): needs_update = isfile(".environment") upload_template( filename="./docker-compose.yml", destination='%s/docker-compose.yml' % folder, template_dir="./", ) if needs_update: run("docker stack rm %s" % env.label) from time import sleep sleep(5) run("docker stack deploy --compose-file docker-compose.yml %s --with-registry-auth" % env.label) else: sys.exit("[docker-compose.yml] is required for deployment")
def deploy(): for f in os.listdir("kubernetes"): path = os.path.join("kubernetes", f) destination = "/var/lib/rancher/k3s/server/manifests/homeassistant-{}".format(f) upload_template(path, destination, {"version": _get_last_built_version()}, use_sudo=True, backup=False)
def install_action(config_file, **kwargs): files.upload_template(config_file, "%(USERDIR)s/crontab.current" % kwargs, kwargs, use_jinja=settings.use_jinja)
def provision(common='master'): """Provision server with masterless Salt minion.""" require('environment') # Install salt minion with settings(warn_only=True): with hide('running', 'stdout', 'stderr'): installed = run('which salt-call') if not installed: bootstrap_file = os.path.join(CONF_ROOT, 'bootstrap-salt.sh') put(bootstrap_file, '/tmp/bootstrap-salt.sh') sudo('sh /tmp/bootstrap-salt.sh daily') # Rsync local states and pillars minion_file = os.path.join(CONF_ROOT, 'minion.conf') files.upload_template(minion_file, '/etc/salt/minion', use_sudo=True, context=env) salt_root = CONF_ROOT if CONF_ROOT.endswith('/') else CONF_ROOT + '/' environments = ['staging', 'production'] # Only include current environment's pillar tree exclude = [ os.path.join('pillar', e) for e in environments if e != env.environment ] project.rsync_project(local_dir=salt_root, remote_dir='/tmp/salt', delete=True, exclude=exclude) sudo('rm -rf /srv/*') sudo('mv /tmp/salt/* /srv/') sudo('rm -rf /tmp/salt/') # Pull common states sudo('rm -rf /tmp/common/') with settings(warn_only=True): with hide('running', 'stdout', 'stderr'): installed = run('which git') if not installed: sudo('apt-get install git-core -q -y') run('git clone git://github.com/caktus/margarita.git /tmp/common/') with cd('/tmp/common/'): run('git checkout %s' % common) sudo('mv /tmp/common/ /srv/common/') sudo('rm -rf /tmp/common/') sudo('chown root:root -R /srv/') # Update to highstate with settings(warn_only=True): sudo( 'salt-call --local state.highstate -l info --out json > /tmp/output.json' ) get('/tmp/output.json', 'output.json') with open('output.json', 'r') as f: try: results = json.load(f) except (TypeError, ValueError) as e: error(u'Non-JSON output from salt-call', exception=e) else: for state, result in results['local'].items(): if not result["result"]: if 'name' in result: print red( u'Error with %(name)s state: %(comment)s' % result) else: print red(u'Error with {0} state: {1}'.format( state, result['comment']))
def set_secrets(): # upload_template( # os.path.join(env.PROJECT_NAME, 'secrets.py'), # os.path.join(env.REMOTE_PROJECT_PATH, env.PROJECT_NAME) # ) upload_template('secrets.py', env.REMOTE_PROJECT_PATH)
def nginx_configs(): with settings(host_string='root@lb'): context = {'host': 'Asgard'} upload_template('../nginx/app.nginx.conf', '/etc/nginx/sites-enabled/readthedocs', context=context, backup=False) upload_template('../nginx/lb.nginx.conf', '/etc/nginx/sites-enabled/lb', context=context, backup=False) upload_template('../nginx/main.nginx.conf', '/etc/nginx/nginx.conf', context=context, backup=False) # Perl config sudo('mkdir -p /usr/share/nginx/perl/') put('../salt/nginx/perl/lib/ReadTheDocs.pm', '/usr/share/nginx/perl/ReadTheDocs.pm') with settings(host_string='root@newasgard'): context = {'host': 'Asgard'} upload_template('../nginx/app.nginx.conf', '/etc/nginx/sites-enabled/readthedocs', context=context, backup=False) upload_template('../nginx/lb.nginx.conf', '/etc/nginx/sites-enabled/lb', context=context, backup=False) upload_template('../nginx/main.nginx.conf', '/etc/nginx/nginx.conf', context=context, backup=False) # Perl config sudo('mkdir -p /usr/share/nginx/perl/') put('../salt/nginx/perl/lib/ReadTheDocs.pm', '/usr/share/nginx/perl/ReadTheDocs.pm') with settings(host_string='root@newChimera'): context = {'host': 'Chimera'} upload_template('../nginx/app.nginx.conf', '/etc/nginx/sites-enabled/readthedocs', context=context, backup=False) upload_template('../nginx/lb.nginx.conf', '/etc/nginx/sites-enabled/lb', context=context, backup=False) upload_template('../nginx/main.nginx.conf', '/etc/nginx/nginx.conf', context=context, backup=False) # Perl config sudo('mkdir -p /usr/share/nginx/perl/') put('../salt/nginx/perl/lib/ReadTheDocs.pm', '/usr/share/nginx/perl/ReadTheDocs.pm')