def delete(state=None, host=None): supported_schema_versions = [ v1beta3.DhcpData, ] validate_schema_version(host.data.dhcp, supported_schema_versions) filename = "dhcp-disable.sh.j2" file_path = './dhcp-disable.sh' files.template( name='Render configuration script', src=deploy_dir / 'templates' / filename, dest=file_path, mode='700', state=state, host=host, ) server.shell( name="Execute configuration script", commands=[file_path], state=state, host=host, )
def configure_node_exporter(state, host, enable_service=True, extra_args=None): op_name = 'Ensure node_exporter service is running' if enable_service: op_name = '{0} and enabled'.format(op_name) if host.fact.linux_distribution['major'] >= 16: # Setup node_exporter init generate_service = files.template( name='Upload the node_exporter systemd unit file', src=get_template_path('node_exporter.service.j2'), dest='/etc/systemd/system/node_exporter.service', extra_args=extra_args, state=state, host=host, ) init.systemd( name=op_name, service='node_exporter', running=True, restarted=generate_service.changed, daemon_reload=generate_service.changed, enabled=enable_service, state=state, host=host, ) elif host.fact.linux_distribution['major'] == 14: generate_service = files.template( name='Upload the node_exporter init.d file', src=get_template_path('init.d.j2'), dest='/etc/init.d/node_exporter', mode=755, ex_name='node_exporter', ex_bin_dir=host.data.node_exporter_bin_dir, ex_user=host.data.node_exporter_user, extra_args=extra_args, state=state, host=host, ) # Start (/enable) the prometheus service init.d( name=op_name, service='node_exporter', running=True, restarted=generate_service.changed, reloaded=generate_service.changed, enabled=enable_service, state=state, host=host, )
def register_concourse_service( concourse_config: Union[ConcourseWebConfig, ConcourseWorkerConfig], state=None, host=None, restart=False, ): # Create Systemd unit to manage Concourse service systemd_unit = files.template( name="Create concourse Systemd unit definition", src=Path(__file__).parent.joinpath("templates/concourse.service.j2"), dest="/etc/systemd/system/concourse.service", concourse_config=concourse_config, state=state, host=host, ) # Enable Systemd service and ensure it is running systemd.service( name="Ensure Concourse service is enabled and running.", service="concourse", running=True, enabled=True, restarted=restart, daemon_reload=systemd_unit.changed, state=state, host=host, )
def configure(state=None, host=None): supported_schema_versions = [ v1beta3.HttpData, ] validate_schema_version(host.data.http, supported_schema_versions) apt.packages( name='Install package', packages=['apache2'], sudo=True, state=state, host=host, ) files.directory( name=f'Ensure HTTP root dir {host.data.http.root_dir}', path=str(host.data.http.root_dir), present=True, recursive=True, sudo=True, state=state, host=host, ) apache_conf = files.template( name='Render config file', src=str(deploy_dir / 'templates' / 'apache2-directory.conf.j2'), dest=str(Path('/etc') / 'apache2' / 'conf-available' / 'root.conf'), mode='744', user='******', group='root', sudo=True, http=host.data.http, state=state, host=host, ) server.shell( name='Enable root.conf', commands=['a2enconf root'], sudo=True, state=state, host=host, ) systemd.service( name='Restart apache2', service='apache2', running=True, restarted=apache_conf.changed, sudo=True, state=state, host=host, )
def configure(state=None, host=None): supported_schemas = [ v1beta3.DnsmasqData ] validate_schema_version(host.data.dnsmasq, supported_schemas) apt.packages( name='Install dnsmasq', packages=['dnsmasq'], sudo=True, state=state, host=host, ) if host.data.dnsmasq.tftp is not None: files.directory( name=f'Ensure TFTP root dir {host.data.dnsmasq.tftp.root_dir}', path=str(host.data.dnsmasq.tftp.root_dir), present=True, recursive=True, sudo=True, state=state, host=host, ) dnsmasq_conf = files.template( name='Render the dnsmasq config', src=str(deploy_dir / 'templates' / 'dnsmasq.conf.j2'), dest=str(Path('/etc') / 'dnsmasq.conf'), mode='744', user='******', group='root', sudo=True, dnsmasq=host.data.dnsmasq, state=state, host=host, ) systemd.service( name='Restart dnsmasq', service='dnsmasq', running=True, restarted=dnsmasq_conf.changed, sudo=True, state=state, host=host, )
def configure_caddy(caddy_config: CaddyConfig, state=None, host=None): if caddy_config.caddyfile.suffix == ".j2": caddy_file = files.template( name="Create Caddyfile", src=caddy_config.caddyfile, dest="/etc/caddy/Caddyfile", context=caddy_config.template_context, state=state, host=host, ) else: caddy_file = files.put( name="Upload Caddyfile", src=caddy_config.caddyfile, dest="/etc/caddy/Caddyfile", state=state, host=host, ) return caddy_file.changed
def configure_concourse( concourse_config: Union[ConcourseWebConfig, ConcourseWorkerConfig], sudo=True, state=None, host=None, ): concourse_env_file = files.template( name="Create Concourse environment file", src=Path(__file__).parent.joinpath("templates/env_file.j2"), dest=concourse_config.env_file_path, concourse_config=concourse_config, user=concourse_config.user, state=state, host=host, sudo=sudo, ) if concourse_config._node_type == "web": # noqa: WPS437 _manage_web_node_keys(concourse_config, state=state, host=host) elif concourse_config._node_type == "worker": # noqa: WPS437 _manage_worker_node_keys(concourse_config, state=state, host=host) return concourse_env_file.changed
def register_services(hashicorp_products: List[HashicorpProduct], state=None, host=None): for product in hashicorp_products: systemd_unit = files.template( name=f"Create service definition for {product.name}", dest=f"/usr/lib/systemd/system/{product.name}.service", src=Path(__file__).parent.joinpath("templates", f"{product.name}.service.j2"), context=product.systemd_template_context, state=state, host=host, ) systemd.service( name=f"Register service for {product.name}", service=product.name, running=True, enabled=True, daemon_reload=systemd_unit.changed, state=state, host=host, )
) # TODO: should reboot after SELINUX is disabled (how to check/easy way to reboot) # TODO: how to determine when reboot is complete # TODO: run sestatus if host in masters: install = yum.packages( name='Install puppet server', packages=['puppetserver'], ) config = files.template( name='Manage the puppet master configuration', src='templates/master_puppet.conf.j2', dest='/etc/puppetlabs/puppet/puppet.conf', ) # TODO: tune always shows as changed # I think it should only show as changed if there really was a difference. # Might have to add a suffix to the sed -i option, then move file only if # there is a diff. Maybe? tune = files.line( name='Tune the puppet server jvm to only use 1gb', path='/etc/sysconfig/puppetserver', line=r'^JAVA_ARGS=.*$', replace='JAVA_ARGS=\\"-Xms1g -Xmx1g -Djruby.logger.class=com.puppetlabs.' 'jruby_utils.jruby.Slf4jLogger\\"', )
from pyinfra import host from pyinfra.facts.server import * from pyinfra.operations import files home = host.get_fact(Home) hostname = host.get_fact(Hostname) files.template(name="Deploy feh configs", src="dotfiles/.fehbg.j2", dest=f"{home}/.fehbg", home=home)
from config.colors import colors from pyinfra import host from pyinfra.facts.server import * from pyinfra.operations import files home = host.get_fact(Home) hostname = host.get_fact(Hostname) files.rsync(name="Deploy bspwm configs [1/2]", src="dotfiles/.config/bspwm/", dest=f"{home}/.config/bspwm/", flags=["-a", "--exclude '*.j2'"]) files.template(name="Deploy bspwm configs [2/2]", src="dotfiles/.config/bspwm/bspwmrc.j2", dest=f"{home}/.config/bspwm/bspwmrc", mode="755", hostname=hostname, colors=colors)
from config.colors import colors from pyinfra import host from pyinfra.facts.server import * from pyinfra.operations import files home = host.get_fact(Home) hostname = host.get_fact(Hostname) # TODO template screen geometry files.template(name="Deploy Dunst config [1/2]", src="dotfiles/.config/dunst/dunstrc.j2", dest=f"{home}/.config/dunst/dunstrc", colors=colors, home=home, hostname=hostname) # TODO template this files.template(name="Deploy Dunst config [2/2]", src="dotfiles/.config/dunst/notify-sound.sh.j2", dest=f"{home}/.config/dunst/notify-sound.sh", mode=755)
def install_caddy(caddy_config: CaddyConfig, state=None, host=None): if caddy_config.plugins: caddy_user = "******" server.user( name="Create system user for Caddy", user=caddy_user, system=True, ensure_home=False, state=state, host=host, ) caddy_install = files.download( name="Download custom build of Caddy", dest="/usr/local/bin/caddy", src=caddy_config.custom_download_url(), mode=DEFAULT_DIRECTORY_MODE, state=state, host=host, ) files.directory( name="Create Caddy configuration directory", path="/etc/caddy/", user=caddy_user, group=caddy_user, present=True, recursive=True, state=state, host=host, ) files.directory( name="Create Caddy configuration directory", path=caddy_config.data_directory, user=caddy_user, group=caddy_user, present=True, recursive=True, state=state, host=host, ) files.template( name="Create SystemD service definition for Caddy", dest="/usr/lib/systemd/system/caddy.service", src=Path(__file__).parent.joinpath("templates/caddy.service.j2"), state=state, host=host, ) else: apt.key( name="Add Caddy repository GPG key", src="https://dl.cloudsmith.io/public/caddy/stable/gpg.key", state=state, host=host, ) apt.repo( name="Set up Caddy APT repository", src="deb https://dl.cloudsmith.io/public/caddy/stable/deb/debian any-version main", # noqa: E501 present=True, filename="caddy.list", state=state, host=host, ) caddy_install = apt.packages( name="Install Caddy from APT", packages=["caddy"], present=True, latest=True, update=True, state=state, host=host, ) if caddy_config.log_file: files.directory( name="Crate Caddy log directory", path=caddy_config.log_file.parent, user=caddy_user, present=True, state=state, host=host, ) return caddy_install.changed
def configure_installer_type_legacy_netboot(state=None, host=None): # # Download the netboot archive # archive_path = host.data.pxe.tftp.root_dir.joinpath( host.data.pxe.installer.netboot_source_url.path.lstrip('/')) files.directory( name=f"Ensure {archive_path.parent}", path=str(archive_path.parent), present=True, sudo=True, host=host, state=state, ) download_netboot_archive = files.download( name=f'Download netboot archive to {archive_path}', src=str(host.data.pxe.installer.netboot_source_url), dest=str(archive_path), sha256sum=host.data.pxe.installer.netboot_sha256sum, sudo=True, host=host, state=state, ) # # Download the OS iso # iso_path = host.data.pxe.http.root_dir.joinpath( host.data.pxe.installer.image_source_url.path.lstrip('/')) files.directory( name=f"Ensure {iso_path.parent}", path=str(iso_path.parent), present=True, sudo=True, host=host, state=state, ) server.shell(name=f'Ensure nothing is mounted on {iso_path.parent}/mnt', commands=[ f'(mount | grep " on {iso_path.parent}/mnt" && ' f'umount {iso_path.parent}/mnt) || :', ], sudo=True, host=host, state=state) download_iso = files.download( name=f'Download OS iso to {iso_path}', src=str(host.data.pxe.installer.image_source_url), dest=str(iso_path), sudo=True, sha256sum=host.data.pxe.installer.image_sha256sum, host=host, state=state, ) iso_mount_path = iso_path.parent / 'mnt' server.shell(name=f'Mount the ISO to {iso_path.parent}/mnt', commands=[ f'mkdir -p {iso_path.parent}/mnt', f'mount {iso_path} {iso_path.parent}/mnt', ], sudo=True, host=host, state=state) # This deploy only supports serving one OS version for now and # to ensure that the extracted bootstrap kernel and ramdisk come # from the correct ISO, we use this template as one of the signals # in the extraction logic further down. Without this, the OS version # being served might change but the bootstrap kernel and ramdisk # might not. # files.template uses SFTP to transfer files so we have to use # a different base path in the case of Synology which presents a # different filesystem hierarchy depending on which protocol you're on. # Related bug: https://github.com/Fizzadar/pyinfra/issues/499 current_installer_flag = host.data.pxe.tftp.sftp_root_dir / 'current-installer' current_installer = files.template( name=f'Write {current_installer_flag}', src=deploy_dir / 'templates' / 'current-installer.j2', dest=current_installer_flag, pxe=host.data.pxe, host=host, state=state, ) # # Extract the kernel and ram disk image for use by the bootloader # kernel_path = host.data.pxe.tftp.root_dir / 'linux' initrd_path = host.data.pxe.tftp.root_dir / 'initrd.gz' if host.fact.file(kernel_path) is None or \ host.fact.file(initrd_path) is None or \ download_netboot_archive.changed or \ download_iso.changed or \ current_installer.changed: # We make use of the kernel and initrd in the netboot archive # since the the ones in the 18.04 iso, specifically under the # {iso_mount_path}/install directory, fail to work properly. kernel_path_in_archive = './ubuntu-installer/amd64/linux' initrd_path_in_archive = './ubuntu-installer/amd64/initrd.gz' server.shell( name='Extract the files from the netboot installer', commands=[ f'tar -zxvf {archive_path} -C {kernel_path.parent} ' f'--strip-components={kernel_path_in_archive.count("/")} ' f'{kernel_path_in_archive}', f'tar -zxvf {archive_path} -C {initrd_path.parent} ' f'--strip-components={initrd_path_in_archive.count("/")} ' f'{initrd_path_in_archive}', ], sudo=True, host=host, state=state) # # Render Legacy Preseed Config # # files.template uses SFTP to transfer files so we have to use # a different base path in the case of Synology which presents a # different filesystem hierarchy depending on which protocol you're on. legacy_preseed_manual_path = host.data.pxe.http.sftp_root_dir.joinpath( 'legacy-preseed-manual.seed') # files.template uses SFTP to transfer files so we have to use # a different base path in the case of Synology which presents a # different filesystem hierarchy depending on which protocol you're on. legacy_preseed_auto_dir = host.data.pxe.http.sftp_root_dir.joinpath( 'legacy-preseed-auto') net_image_disk_path = iso_mount_path / 'install' / 'filesystem.squashfs' net_image_http_path = str( Path(host.data.pxe.installer.image_source_url.path.lstrip( '/')).parent.joinpath('mnt', 'install', 'filesystem.squashfs')) server.shell(name='Check that squashfs file exists', commands=[ f'test -f {net_image_disk_path}', ], sudo=True, host=host, state=state) files.template( name='Render legacy preseed config', src=deploy_dir / 'templates' / 'legacy-preseed-manual.seed.j2', # files.template uses SFTP to transfer files so we have to use # a different base path in the case of Synology which presents a # different filesystem hierarchy depending on which protocol you're on. dest=host.data.pxe.http.sftp_root_dir / legacy_preseed_manual_path, create_remote_dir=False, pxe=host.data.pxe, net_image_http_path=net_image_http_path, host=host, state=state, ) # # Render GRUB2 config # installer_path = Path( host.data.pxe.installer.netboot_source_url.path.lstrip('/')) # Synology's SFTP permissions are unusual in that they don't allow # you to create directories (which we want to do in the files.template # operation after this one). As a workaround to that, we're going to # ensure the directory via the files.directory operation since it uses # just SSH. grub_dir = host.data.pxe.tftp.root_dir / 'grub' files.directory( name=f'Ensure {grub_dir} exists', path=grub_dir, present=True, host=host, state=state, ) files.template( name='Render GRUB config', src=deploy_dir / 'templates' / 'grub2.legacy-netboot.cfg.j2', # files.template uses SFTP to transfer files so we have to use # a different base path in the case of Synology which presents a # different filesystem hierarchy depending on which protocol you're on. dest=host.data.pxe.tftp.sftp_root_dir / 'grub' / 'grub.cfg', create_remote_dir=False, initrd_filename=Path(initrd_path).name, installer_path=installer_path, kernel_filename=kernel_path.name, legacy_preseed_auto_dir=legacy_preseed_auto_dir.stem, legacy_preseed_manual_path=legacy_preseed_manual_path.name, net_image_http_path=net_image_http_path, os_name=Path(host.data.pxe.installer.image_source_url.path).stem, pxe=host.data.pxe, host=host, state=state, ) # # Render the machine-specific preseed files # # Synology's SFTP permissions are unusual in that they don't allow # you to create directories (which we want to do in the files.template # operation after this one). As a workaround to that, we're going to # ensure the directory via the files.directory operation since it uses # just SSH. legacy_preseed_auto_dir_ssh_path = \ host.data.pxe.http.root_dir / legacy_preseed_auto_dir.stem files.directory( name=f'Ensure {grub_dir} exists', path=legacy_preseed_auto_dir_ssh_path, present=True, host=host, state=state, ) for machine in host.data.pxe.machines: machine_legacy_preseed_path = legacy_preseed_auto_dir / machine.hostname files.template( name=f'Render {machine_legacy_preseed_path}', src=deploy_dir / 'templates' / 'legacy-preseed-auto.seed.j2', dest=machine_legacy_preseed_path, create_remote_dir=False, machine=machine, pxe=host.data.pxe, host=host, state=state, )
# Execute the @deploy function my_deploy() # Do a loop which will generate duplicate op hashes for i in range(2): server.shell( name="Loop-{0} main operation".format(i), commands="echo loop_{0}_main_operation".format(i), ) call_file_op() with state.preserve_loop_order([1, 2]) as loop_items: for item in loop_items(): server.shell( name="Order loop {0}".format(item), commands="echo loop_{0}".format(item), ) server.shell( name="2nd Order loop {0}".format(item), commands="echo loop_{0}".format(item), ) if host.name == "somehost": files.template( name="Final limited operation", src="templates/a_template.j2", dest="/a_template", is_template=True, )
name='Ensure myweb user exists', user='******', shell='/bin/bash', ) files.directory( name='Ensure /web exists', path='/web', user='******', group='myweb', ) files.template( name='Create script to run inside the service', src='templates/myweb.sh.j2', dest='/usr/local/bin/myweb.sh', mode='755', user='******', group='myweb', ) files.template( name='Create service file', src='templates/myweb.service.j2', dest='/etc/systemd/system/myweb.service', mode='755', user='******', group='root', ) files.template( name='Create index.html',
from pyinfra import host, inventory from pyinfra.operations import files, puppet SUDO = True USE_SUDO_LOGIN = True if host in inventory.get_group('master_servers'): files.template( {'Create a puppet manifest'}, 'templates/environments/production/manifests/httpd.pp.j2', '/etc/puppetlabs/code/environments/production/manifests/httpd.pp', ) if host in inventory.get_group('agent_servers'): # Either 'USE_SUDO_LOGIN=True' or 'USE_SU_LOGIN=True' for # puppet.agent() as `puppet` is added to the path in # the .bash_profile. # We also expect a return code of: # 0=no changes or 2=changes applied puppet.agent( {'Run the puppet agent'}, success_exit_codes=[0, 2], )
from pyinfra import host from pyinfra.facts.server import * from pyinfra.operations import files from pyinfra.operations import server home = host.get_fact(Home) hostname = host.get_fact(Hostname) files.rsync(name="Sync Prezto configs", src="dotfiles/.zprezto/runcoms/", dest=f"{home}/.zprezto/runcoms/", flags=["-a", "--exclude '*.j2'"]) files.template(name="Deploy .zprofile", src="dotfiles/.zprezto/runcoms/zprofile.j2", dest=f"{home}/.zprezto/runcoms/zprofile", hostname=hostname)
def configure_prometheus(state, host, enable_service=True, extra_args=None): # Configure prometheus generate_config = files.template( name='Upload the prometheus config file', src=get_template_path('prometheus.yml.j2'), dest='/etc/default/prometheus.yml', state=state, host=host, ) op_name = 'Ensure prometheus service is running' if enable_service: op_name = '{0} and enabled'.format(op_name) restart = generate_config.changed if extra_args and ('--web.enable-lifecycle' in extra_args): restart = False hit_reload_endpoint = True else: hit_reload_endpoint = False # Setup prometheus init if host.fact.linux_distribution['major'] >= 16: generate_service = files.template( name='Upload the prometheus systemd unit file', src=get_template_path('prometheus.service.j2'), dest='/etc/systemd/system/prometheus.service', extra_args=extra_args, state=state, host=host, ) # Start (/enable) the prometheus service init.systemd( name=op_name, service='prometheus', running=True, restarted=restart, enabled=enable_service, daemon_reload=generate_service.changed, state=state, host=host, ) # This has to happen after the service reload if hit_reload_endpoint: server.shell( commands='curl -X POST http://localhost:9090/-/reload', state=state, host=host, ) elif host.fact.linux_distribution['major'] == 14: generate_service = files.template( name='Upload the prometheus init.d file', src=get_template_path('init.d.j2'), dest='/etc/init.d/prometheus', extra_args=extra_args, state=state, host=host, ) # Start (/enable) the prometheus service init.d( name=op_name, service='prometheus', running=True, restarted=restart, reloaded=generate_service.changed, enabled=enable_service, state=state, host=host, )
files.download( name='Download `{}`'.format(uefi_file), src='http://archive.ubuntu.com/ubuntu/dists/trusty/main/' 'uefi/grub2-amd64/current/grubnetx64.efi.signed', dest=uefi_full_path, ) grub_dir = '{}/grub'.format(tftp_dir) files.directory( name='Ensure the `{}` exists'.format(grub_dir), path=grub_dir, ) files.template( name='Create a templated file', src='templates/grub.cfg.j2', dest='{}/grub.cfg'.format(grub_dir), ) # configure dnsmasq files.template( name='Create dnsmasq configuration file', src='templates/dnsmasq.conf.j2', dest='/etc/dnsmasq.conf', pxe_server=pxe_server, dns_server=dns_server, interface=interface, dhcp_start=dhcp_start, dhcp_end=dhcp_end, tftp_dir=tftp_dir, )
from config.colors import colors from pyinfra import host from pyinfra.facts.server import * from pyinfra.operations import files home = host.get_fact(Home) hostname = host.get_fact(Hostname) files.template(name="Deploy Rofi configs", src="dotfiles/.config/rofi/config.rasi.j2", dest=f"{home}/.config/rofi/config.rasi", colors=colors, hostname=hostname)
def generate_vault_token(): hvac_client = hvac.Client() return hvac_client.create_token( policies=["vault-consul-tls-policy"], period="168h", orphan=True, )["auth"]["client_token"] is_vault_core = host.data.get("consul_server") or host.data.get("vault_server") files.template( name="Create Consul Template base config.", src="templates/consul_template/base.j2", dest="/etc/consul-template/config/00-base.hcl", mode="644", vault_url=host.data.vault_url, vault_token=generate_vault_token() if is_vault_core else None, ) if host.get_fact(LinuxName) == "Alpine": server.packages( name="Ensure Consul Template is installed.", packages=["consul-template"], present=True, ) files.link( name="Symlink consul-template.hcl to config.hcl", path="/etc/consul-template/consul-template.hcl", target="/etc/consul-template/configs",
from pyinfra import config, host, inventory from pyinfra.operations import files, puppet config.SUDO = True config.USE_SUDO_LOGIN = True if host in inventory.get_group("master_servers"): files.template( name="Create a puppet manifest", src="templates/environments/production/manifests/httpd.pp.j2", dest="/etc/puppetlabs/code/environments/production/manifests/httpd.pp", ) if host in inventory.get_group("agent_servers"): # Either 'USE_SUDO_LOGIN=True' or 'USE_SU_LOGIN=True' for # puppet.agent() as `puppet` is added to the path in # the .bash_profile. # We also expect a return code of: # 0=no changes or 2=changes applied puppet.agent( name="Run the puppet agent", success_exit_codes=[0, 2], )
replace='net.inet.ip.forwarding=1', ) if IPV6NETWORK: server.shell( name='Enable IPv6 packet forwarding', commands=['sysctl net.inet6.ip6.forwarding=1'], ) files.line( name='Persist IPv4 packet forwarding', path='/etc/sysctl.conf', line=r'^net.inet6.ip6.forwarding=', replace='net.inet6.ip6.forwarding=1', ) files.template( name='Generate PF config', src='templates/pf.conf.j2', dest='/etc/pf.conf', mode='600', DEFAULT_IF=DEFAULT_IF, WG_IF=WG_IF, IPV6NETWORK=IPV6NETWORK, ) server.shell( name='Enable PF', commands=['pfctl -f /etc/pf.conf; pfctl -e || true'], )
from config.colors import colors from pyinfra import host from pyinfra.facts.server import * from pyinfra.operations import files home = host.get_fact(Home) hostname = host.get_fact(Hostname) files.rsync( name="Deploy xinit configs", src="dotfiles/.xinitrc", dest=f"{home}/.xinitrc" ) files.template( name="Deploy Xresources", src="dotfiles/.Xresources.j2", dest=f"{home}/.Xresources", colors=colors )
from pyinfra import host, inventory from pyinfra.operations import files, puppet SUDO = True USE_SUDO_LOGIN = True if host in inventory.get_group('master_servers'): files.template( name='Create a puppet manifest', src='templates/environments/production/manifests/httpd.pp.j2', dest='/etc/puppetlabs/code/environments/production/manifests/httpd.pp', ) if host in inventory.get_group('agent_servers'): # Either 'USE_SUDO_LOGIN=True' or 'USE_SU_LOGIN=True' for # puppet.agent() as `puppet` is added to the path in # the .bash_profile. # We also expect a return code of: # 0=no changes or 2=changes applied puppet.agent( name='Run the puppet agent', success_exit_codes=[0, 2], )
from config.colors import colors from pyinfra import host from pyinfra.facts.server import * from pyinfra.operations import files home = host.get_fact(Home) hostname = host.get_fact(Hostname) files.template(name="Deploy sxhkd config", src="dotfiles/.config/sxhkd/sxhkdrc.j2", dest=f"{home}/.config/sxhkd/sxhkdrc", hostname=hostname) files.rsync(name="Deploy sxhkd scripts", src="dotfiles/.config/sxhkd/scripts/", dest=f"{home}/.config/sxhkd/scripts/", flags=["-a"])
files.download( name="Download `{}`".format(uefi_file), src="http://archive.ubuntu.com/ubuntu/dists/trusty/main/" "uefi/grub2-amd64/current/grubnetx64.efi.signed", dest=uefi_full_path, ) grub_dir = "{}/grub".format(tftp_dir) files.directory( name="Ensure the `{}` exists".format(grub_dir), path=grub_dir, ) files.template( name="Create a templated file", src="templates/grub.cfg.j2", dest="{}/grub.cfg".format(grub_dir), ) # configure dnsmasq files.template( name="Create dnsmasq configuration file", src="templates/dnsmasq.conf.j2", dest="/etc/dnsmasq.conf", pxe_server=pxe_server, dns_server=dns_server, interface=interface, dhcp_start=dhcp_start, dhcp_end=dhcp_end, tftp_dir=tftp_dir, )
def configure_installer_type_autoinstall_v1(state=None, host=None): # # Download the OS installer image # iso_path = host.data.pxe.http.root_dir.joinpath( host.data.pxe.installer.image_source_url.path.lstrip('/')) files.directory( name=f"Ensure {iso_path.parent}", path=str(iso_path.parent), present=True, sudo=True, host=host, state=state, ) download_installer = files.download( name=f'Download Installer Image to {iso_path}', src=str(host.data.pxe.installer.image_source_url), dest=str(iso_path), sha256sum=host.data.pxe.installer.image_sha256sum, sudo=True, host=host, state=state, ) # This deploy only supports serving one OS version for now and # to ensure that the extracted bootstrap kernel and ramdisk come # from the correct ISO, we use this template as one of the signals # in the extraction logic further down. Without this, the OS version # being served might change but the bootstrap kernel and ramdisk # might not. # files.template uses SFTP to transfer files so we have to use # a different base path in the case of Synology which presents a # different filesystem hierarchy depending on which protocol you're on. # Related bug: https://github.com/Fizzadar/pyinfra/issues/499 current_installer_flag = host.data.pxe.tftp.sftp_root_dir / 'current-installer' current_installer = files.template( name=f'Write {current_installer_flag}', src=deploy_dir / 'templates' / 'current-installer.j2', dest=current_installer_flag, pxe=host.data.pxe, host=host, state=state, ) # # Extract the kernel and ram disk image for use by the bootloader # kernel_path = str(host.data.pxe.tftp.root_dir / 'vmlinuz') initrd_path = str(host.data.pxe.tftp.root_dir / 'initrd') if host.fact.file(kernel_path) is None or \ host.fact.file(initrd_path) is None or \ download_installer.changed or \ current_installer.changed: server.shell( name='Mount the ISO to /mnt', commands=[ f'mount | grep "{iso_path} on /mnt" || mount {iso_path} /mnt', ], sudo=True, host=host, state=state) server.shell( name="Extract kernel and initrd from ISO", commands=[ f'cp /mnt/casper/vmlinuz {kernel_path}', f'cp /mnt/casper/initrd {initrd_path}', ], sudo=True, host=host, state=state, ) server.shell( name=f'Ensure {iso_path} is unmounted', commands=[ f'(mount | grep "{iso_path} on /mnt" && umount /mnt) || :', ], sudo=True, host=host, state=state) # # Render GRUB2 config # # Synology's SFTP permissions are unusual in that they don't allow # you to create directories (which we want to do in the files.template # operation after this one). As a workaround to that, we're going to # ensure the directory via the files.directory operation since it uses # just SSH. files.directory( name='Ensure grub/ directory exists', path=str(host.data.pxe.tftp.root_dir / 'grub'), present=True, sudo=True, host=host, state=state, ) files.template( name='Render GRUB config', src=str(deploy_dir / 'templates' / 'grub2.autoinstall-v1.cfg.j2'), # files.template uses SFTP to transfer files so we have to use # a different base path in the case of Synology which presents a # different filesystem hierarchy depending on which protocol you're on. dest=str(host.data.pxe.tftp.sftp_root_dir / 'grub' / 'grub.cfg'), create_remote_dir=False, pxe=host.data.pxe, os_name=Path(host.data.pxe.installer.image_source_url.path).stem, kernel_filename=Path(kernel_path).name, initrd_filename=Path(initrd_path).name, host=host, state=state, ) # # Render the machine-specific user-data and meta-data files # for machine in host.data.pxe.machines: # Synology's SFTP permissions are unusual in that they don't allow # you to create directories (which we want to do in the files.template # operation after this one). As a workaround to that, we're going to # ensure the directory via the files.directory operation since it uses # just SSH. machine_dir = host.data.pxe.http.root_dir / machine.hostname files.directory( name=f'Ensure {machine_dir} exists', path=machine_dir, present=True, host=host, state=state, ) # files.template uses SFTP to transfer files so we have to use # a different base path in the case of Synology which presents a # different filesystem hierarchy depending on which protocol you're on. meta_data_path = \ host.data.pxe.http.sftp_root_dir / machine.hostname / 'meta-data' files.template( name=f'Render {meta_data_path}', src=str(deploy_dir / 'templates' / 'meta-data.j2'), dest=str(meta_data_path), create_remote_dir=False, machine=machine, host=host, state=state, ) # files.template uses SFTP to transfer files so we have to use # a different base path in the case of Synology which presents a # different filesystem hierarchy depending on which protocol you're on. user_data_path = \ host.data.pxe.http.sftp_root_dir / machine.hostname / 'user-data' files.template( name=f'Render {user_data_path}', src=str(deploy_dir / 'templates' / 'user-data.j2'), dest=str(user_data_path), create_remote_dir=False, machine=machine, host=host, state=state, )
from config.colors import colors from pyinfra import host from pyinfra.facts.server import * from pyinfra.operations import files home = host.get_fact(Home) hostname = host.get_fact(Hostname) files.template(name="Deploy polybar configs [1/2]", src="dotfiles/.config/polybar/config.j2", dest=f"{home}/.config/polybar/config", hostname=hostname) files.template(name="Deploy polybar configs [2/2]", src="dotfiles/.config/polybar/launch.sh.j2", dest=f"{home}/.config/polybar/launch.sh", mode="755", hostname=hostname)