def configure(state=None, host=None): supported_schema_versions = [ v1beta3.PxeData, ] validate_schema_version(host.data.pxe, supported_schema_versions) # # Download bootfile(s) # for bootfile in host.data.pxe.bootfiles: bootfile_dir = (host.data.pxe.tftp.root_dir / bootfile.get_path()).parent files.directory( name='Ensure bootfile directory', path=str(bootfile_dir), present=True, sudo=True, host=host, state=state, ) files.download( name=f'Download bootfile {bootfile.get_path()}', src=str(bootfile.image_source_url), dest=str(host.data.pxe.tftp.root_dir / bootfile.get_path()), sha256sum=bootfile.image_sha256sum, sudo=True, host=host, state=state, ) if host.data.pxe.installer.type == InstallerType.autoinstall_v1: configure_installer_type_autoinstall_v1(state=state, host=host) elif host.data.pxe.installer.type == InstallerType.legacy_netboot: configure_installer_type_legacy_netboot(state=state, host=host) else: raise UnsupportedInstallerTypeError(host.data.pxe.installer) # TODO: Check if this should be moved to apache2.py and if doing that requires # that pxe.configure be executed prior to apache2.configure files.directory( name= f"Ensure www-data has read permissions in {host.data.pxe.http.root_dir}", path=str(host.data.pxe.http.root_dir), user='******', group='www-data', mode='0755', recursive=True, sudo=True, state=state, host=host, )
def install_node_exporter(state, host): if not host.data.node_exporter_version: raise DeployError( 'No node_exporter_version set for this host, refusing to install node_exporter!', ) server.user( name='Create the node_exporter user (Called prometheus by default)', user='******', shell='/sbin/nologin', state=state, host=host, ) files.directory( name='Ensure the node_exporter install directory exists', path='{{ host.data.node_exporter_install_dir }}', user=host.data.node_exporter_user, group=host.data.node_exporter_user, state=state, host=host, ) # Work out the filename host.data.node_exporter_version_name = ( 'node_exporter-{0}.linux-' 'amd64' if host.fact.arch == 'x86_64' else host.fact.arch).format( host.data.node_exporter_version) host.data.node_exporter_temp_filename = state.get_temp_filename( 'node_exporter-{0}'.format(host.data.node_exporter_version), ) download_node_exporter = files.download( name='Download node_exporter', src=('{{ host.data.node_exporter_download_base_url }}/' 'v{{ host.data.node_exporter_version }}/' '{{ host.data.node_exporter_version_name }}.tar.gz'), dest='{{ host.data.node_exporter_temp_filename }}', state=state, host=host, ) # If we downloaded node_exporter, extract it! if download_node_exporter.changed: server.shell( name='Extract node_exporter', commands='tar -xzf {{ host.data.node_exporter_temp_filename }}' ' -C {{ host.data.node_exporter_install_dir }}', state=state, host=host, ) files.link( name='Symlink node_exporter to /usr/bin', path='{{ host.data.node_exporter_bin_dir }}/node_exporter', # link target='{{ host.data.node_exporter_install_dir }}/' '{{ host.data.node_exporter_version_name }}/node_exporter', state=state, host=host, )
def install_exporter( state, host, ex_url, ex_install_dir=None, ex_user='******', ex_bin_dir='/usr/local/bin', ): if ex_install_dir is None: ex_install_dir = '/usr/local' ex_name, ex_bin_name = _get_names(ex_url) server.user( name='Create the node_exporter user (Called prometheus by default)', user=ex_user, shell='/sbin/nologin', state=state, host=host, ) files.directory( name='Ensure the node_exporter install directory exists', path='{}/{}'.format(ex_install_dir, ex_name), user=host.data.node_exporter_user, group=host.data.node_exporter_user, state=state, host=host, ) ex_temp_filename = state.get_temp_filename(ex_url, ) download_exporter = files.download( name='Download exporter', src=ex_url, dest=ex_temp_filename, state=state, host=host, ) # If we downloaded exporter, extract it! if download_exporter.changed: server.shell( name='Extract exporter', commands='tar -xzf {} -C {}/'.format(ex_temp_filename, ex_install_dir), state=state, host=host, ) files.link( name='Symlink exporter to /usr/local/bin', path='{}/{}'.format(ex_bin_dir, ex_name), # link target='{}/{}/{}'.format(ex_install_dir, ex_name, ex_bin_name), state=state, host=host, )
def configure(state=None, host=None): supported_schema_versions = [ v1beta3.PxeData, ] validate_schema_version(host.data.pxe, supported_schema_versions) # # Download bootfile(s) # for bootfile in host.data.pxe.bootfiles: bootfile_dir = (host.data.pxe.tftp.root_dir / bootfile.get_path()).parent files.directory( name='Ensure bootfile directory', path=str(bootfile_dir), present=True, sudo=True, host=host, state=state, ) files.download( name=f'Download bootfile {bootfile.get_path()}', src=str(bootfile.image_source_url), dest=str(host.data.pxe.tftp.root_dir / bootfile.get_path()), sha256sum=bootfile.image_sha256sum, sudo=True, host=host, state=state, ) if host.data.pxe.installer.type == InstallerType.autoinstall_v1: configure_installer_type_autoinstall_v1(state=state, host=host) elif host.data.pxe.installer.type == InstallerType.legacy_netboot: configure_installer_type_legacy_netboot(state=state, host=host) else: raise UnsupportedInstallerTypeError(host.data.pxe.installer)
def configure_installer_type_autoinstall_v1(state=None, host=None): # # Download the OS installer image # iso_path = host.data.pxe.http.root_dir.joinpath( host.data.pxe.installer.image_source_url.path.lstrip('/')) files.directory( name=f"Ensure {iso_path.parent}", path=str(iso_path.parent), present=True, sudo=True, host=host, state=state, ) download_installer = files.download( name=f'Download Installer Image to {iso_path}', src=str(host.data.pxe.installer.image_source_url), dest=str(iso_path), sha256sum=host.data.pxe.installer.image_sha256sum, sudo=True, host=host, state=state, ) # This deploy only supports serving one OS version for now and # to ensure that the extracted bootstrap kernel and ramdisk come # from the correct ISO, we use this template as one of the signals # in the extraction logic further down. Without this, the OS version # being served might change but the bootstrap kernel and ramdisk # might not. # files.template uses SFTP to transfer files so we have to use # a different base path in the case of Synology which presents a # different filesystem hierarchy depending on which protocol you're on. # Related bug: https://github.com/Fizzadar/pyinfra/issues/499 current_installer_flag = host.data.pxe.tftp.sftp_root_dir / 'current-installer' current_installer = files.template( name=f'Write {current_installer_flag}', src=deploy_dir / 'templates' / 'current-installer.j2', dest=current_installer_flag, pxe=host.data.pxe, host=host, state=state, ) # # Extract the kernel and ram disk image for use by the bootloader # kernel_path = str(host.data.pxe.tftp.root_dir / 'vmlinuz') initrd_path = str(host.data.pxe.tftp.root_dir / 'initrd') if host.fact.file(kernel_path) is None or \ host.fact.file(initrd_path) is None or \ download_installer.changed or \ current_installer.changed: server.shell( name='Mount the ISO to /mnt', commands=[ f'mount | grep "{iso_path} on /mnt" || mount {iso_path} /mnt', ], sudo=True, host=host, state=state) server.shell( name="Extract kernel and initrd from ISO", commands=[ f'cp /mnt/casper/vmlinuz {kernel_path}', f'cp /mnt/casper/initrd {initrd_path}', ], sudo=True, host=host, state=state, ) server.shell( name=f'Ensure {iso_path} is unmounted', commands=[ f'(mount | grep "{iso_path} on /mnt" && umount /mnt) || :', ], sudo=True, host=host, state=state) # # Render GRUB2 config # # Synology's SFTP permissions are unusual in that they don't allow # you to create directories (which we want to do in the files.template # operation after this one). As a workaround to that, we're going to # ensure the directory via the files.directory operation since it uses # just SSH. files.directory( name='Ensure grub/ directory exists', path=str(host.data.pxe.tftp.root_dir / 'grub'), present=True, sudo=True, host=host, state=state, ) files.template( name='Render GRUB config', src=str(deploy_dir / 'templates' / 'grub2.autoinstall-v1.cfg.j2'), # files.template uses SFTP to transfer files so we have to use # a different base path in the case of Synology which presents a # different filesystem hierarchy depending on which protocol you're on. dest=str(host.data.pxe.tftp.sftp_root_dir / 'grub' / 'grub.cfg'), create_remote_dir=False, pxe=host.data.pxe, os_name=Path(host.data.pxe.installer.image_source_url.path).stem, kernel_filename=Path(kernel_path).name, initrd_filename=Path(initrd_path).name, host=host, state=state, ) # # Render the machine-specific user-data and meta-data files # for machine in host.data.pxe.machines: # Synology's SFTP permissions are unusual in that they don't allow # you to create directories (which we want to do in the files.template # operation after this one). As a workaround to that, we're going to # ensure the directory via the files.directory operation since it uses # just SSH. machine_dir = host.data.pxe.http.root_dir / machine.hostname files.directory( name=f'Ensure {machine_dir} exists', path=machine_dir, present=True, host=host, state=state, ) # files.template uses SFTP to transfer files so we have to use # a different base path in the case of Synology which presents a # different filesystem hierarchy depending on which protocol you're on. meta_data_path = \ host.data.pxe.http.sftp_root_dir / machine.hostname / 'meta-data' files.template( name=f'Render {meta_data_path}', src=str(deploy_dir / 'templates' / 'meta-data.j2'), dest=str(meta_data_path), create_remote_dir=False, machine=machine, host=host, state=state, ) # files.template uses SFTP to transfer files so we have to use # a different base path in the case of Synology which presents a # different filesystem hierarchy depending on which protocol you're on. user_data_path = \ host.data.pxe.http.sftp_root_dir / machine.hostname / 'user-data' files.template( name=f'Render {user_data_path}', src=str(deploy_dir / 'templates' / 'user-data.j2'), dest=str(user_data_path), create_remote_dir=False, machine=machine, host=host, state=state, )
def configure_installer_type_legacy_netboot(state=None, host=None): # # Download the netboot archive # archive_path = host.data.pxe.tftp.root_dir.joinpath( host.data.pxe.installer.netboot_source_url.path.lstrip('/')) files.directory( name=f"Ensure {archive_path.parent}", path=str(archive_path.parent), present=True, sudo=True, host=host, state=state, ) download_netboot_archive = files.download( name=f'Download netboot archive to {archive_path}', src=str(host.data.pxe.installer.netboot_source_url), dest=str(archive_path), sha256sum=host.data.pxe.installer.netboot_sha256sum, sudo=True, host=host, state=state, ) # # Download the OS iso # iso_path = host.data.pxe.http.root_dir.joinpath( host.data.pxe.installer.image_source_url.path.lstrip('/')) files.directory( name=f"Ensure {iso_path.parent}", path=str(iso_path.parent), present=True, sudo=True, host=host, state=state, ) server.shell(name=f'Ensure nothing is mounted on {iso_path.parent}/mnt', commands=[ f'(mount | grep " on {iso_path.parent}/mnt" && ' f'umount {iso_path.parent}/mnt) || :', ], sudo=True, host=host, state=state) download_iso = files.download( name=f'Download OS iso to {iso_path}', src=str(host.data.pxe.installer.image_source_url), dest=str(iso_path), sudo=True, sha256sum=host.data.pxe.installer.image_sha256sum, host=host, state=state, ) iso_mount_path = iso_path.parent / 'mnt' server.shell(name=f'Mount the ISO to {iso_path.parent}/mnt', commands=[ f'mkdir -p {iso_path.parent}/mnt', f'mount {iso_path} {iso_path.parent}/mnt', ], sudo=True, host=host, state=state) # This deploy only supports serving one OS version for now and # to ensure that the extracted bootstrap kernel and ramdisk come # from the correct ISO, we use this template as one of the signals # in the extraction logic further down. Without this, the OS version # being served might change but the bootstrap kernel and ramdisk # might not. # files.template uses SFTP to transfer files so we have to use # a different base path in the case of Synology which presents a # different filesystem hierarchy depending on which protocol you're on. # Related bug: https://github.com/Fizzadar/pyinfra/issues/499 current_installer_flag = host.data.pxe.tftp.sftp_root_dir / 'current-installer' current_installer = files.template( name=f'Write {current_installer_flag}', src=deploy_dir / 'templates' / 'current-installer.j2', dest=current_installer_flag, pxe=host.data.pxe, host=host, state=state, ) # # Extract the kernel and ram disk image for use by the bootloader # kernel_path = host.data.pxe.tftp.root_dir / 'linux' initrd_path = host.data.pxe.tftp.root_dir / 'initrd.gz' if host.fact.file(kernel_path) is None or \ host.fact.file(initrd_path) is None or \ download_netboot_archive.changed or \ download_iso.changed or \ current_installer.changed: # We make use of the kernel and initrd in the netboot archive # since the the ones in the 18.04 iso, specifically under the # {iso_mount_path}/install directory, fail to work properly. kernel_path_in_archive = './ubuntu-installer/amd64/linux' initrd_path_in_archive = './ubuntu-installer/amd64/initrd.gz' server.shell( name='Extract the files from the netboot installer', commands=[ f'tar -zxvf {archive_path} -C {kernel_path.parent} ' f'--strip-components={kernel_path_in_archive.count("/")} ' f'{kernel_path_in_archive}', f'tar -zxvf {archive_path} -C {initrd_path.parent} ' f'--strip-components={initrd_path_in_archive.count("/")} ' f'{initrd_path_in_archive}', ], sudo=True, host=host, state=state) # # Render Legacy Preseed Config # # files.template uses SFTP to transfer files so we have to use # a different base path in the case of Synology which presents a # different filesystem hierarchy depending on which protocol you're on. legacy_preseed_manual_path = host.data.pxe.http.sftp_root_dir.joinpath( 'legacy-preseed-manual.seed') # files.template uses SFTP to transfer files so we have to use # a different base path in the case of Synology which presents a # different filesystem hierarchy depending on which protocol you're on. legacy_preseed_auto_dir = host.data.pxe.http.sftp_root_dir.joinpath( 'legacy-preseed-auto') net_image_disk_path = iso_mount_path / 'install' / 'filesystem.squashfs' net_image_http_path = str( Path(host.data.pxe.installer.image_source_url.path.lstrip( '/')).parent.joinpath('mnt', 'install', 'filesystem.squashfs')) server.shell(name='Check that squashfs file exists', commands=[ f'test -f {net_image_disk_path}', ], sudo=True, host=host, state=state) files.template( name='Render legacy preseed config', src=deploy_dir / 'templates' / 'legacy-preseed-manual.seed.j2', # files.template uses SFTP to transfer files so we have to use # a different base path in the case of Synology which presents a # different filesystem hierarchy depending on which protocol you're on. dest=host.data.pxe.http.sftp_root_dir / legacy_preseed_manual_path, create_remote_dir=False, pxe=host.data.pxe, net_image_http_path=net_image_http_path, host=host, state=state, ) # # Render GRUB2 config # installer_path = Path( host.data.pxe.installer.netboot_source_url.path.lstrip('/')) # Synology's SFTP permissions are unusual in that they don't allow # you to create directories (which we want to do in the files.template # operation after this one). As a workaround to that, we're going to # ensure the directory via the files.directory operation since it uses # just SSH. grub_dir = host.data.pxe.tftp.root_dir / 'grub' files.directory( name=f'Ensure {grub_dir} exists', path=grub_dir, present=True, host=host, state=state, ) files.template( name='Render GRUB config', src=deploy_dir / 'templates' / 'grub2.legacy-netboot.cfg.j2', # files.template uses SFTP to transfer files so we have to use # a different base path in the case of Synology which presents a # different filesystem hierarchy depending on which protocol you're on. dest=host.data.pxe.tftp.sftp_root_dir / 'grub' / 'grub.cfg', create_remote_dir=False, initrd_filename=Path(initrd_path).name, installer_path=installer_path, kernel_filename=kernel_path.name, legacy_preseed_auto_dir=legacy_preseed_auto_dir.stem, legacy_preseed_manual_path=legacy_preseed_manual_path.name, net_image_http_path=net_image_http_path, os_name=Path(host.data.pxe.installer.image_source_url.path).stem, pxe=host.data.pxe, host=host, state=state, ) # # Render the machine-specific preseed files # # Synology's SFTP permissions are unusual in that they don't allow # you to create directories (which we want to do in the files.template # operation after this one). As a workaround to that, we're going to # ensure the directory via the files.directory operation since it uses # just SSH. legacy_preseed_auto_dir_ssh_path = \ host.data.pxe.http.root_dir / legacy_preseed_auto_dir.stem files.directory( name=f'Ensure {grub_dir} exists', path=legacy_preseed_auto_dir_ssh_path, present=True, host=host, state=state, ) for machine in host.data.pxe.machines: machine_legacy_preseed_path = legacy_preseed_auto_dir / machine.hostname files.template( name=f'Render {machine_legacy_preseed_path}', src=deploy_dir / 'templates' / 'legacy-preseed-auto.seed.j2', dest=machine_legacy_preseed_path, create_remote_dir=False, machine=machine, pxe=host.data.pxe, host=host, state=state, )
def install_prometheus(state, host): if not host.data.prometheus_version: raise DeployError( 'No prometheus_version set for this host, refusing to install prometheus!', ) server.user( name='Create the prometheus user', user='******', shell='/sbin/nologin', state=state, host=host, ) files.directory( name='Ensure the prometheus data directory exists', path='{{ host.data.prometheus_data_dir }}', user=host.data.prometheus_user, group=host.data.prometheus_user, state=state, host=host, ) files.directory( name='Ensure the prometheus install directory exists', path='{{ host.data.prometheus_install_dir }}', user=host.data.prometheus_user, group=host.data.prometheus_user, state=state, host=host, ) # Work out the filename host.data.prometheus_version_name = ('prometheus-{0}.linux-' 'amd64' if host.fact.arch == 'x86_64' else host.fact.arch).format( host.data.prometheus_version) host.data.prometheus_temp_filename = state.get_temp_filename( 'prometheus-{0}'.format(host.data.prometheus_version), ) download_prometheus = files.download( name='Download prometheus', src=('{{ host.data.prometheus_download_base_url }}/' 'v{{ host.data.prometheus_version }}/' '{{ host.data.prometheus_version_name }}.tar.gz'), dest='{{ host.data.prometheus_temp_filename }}', state=state, host=host, ) # If we downloaded prometheus, extract it! if download_prometheus.changed: server.shell( name='Extract prometheus', commands='tar -xzf {{ host.data.prometheus_temp_filename }}' ' -C {{ host.data.prometheus_install_dir }}', state=state, host=host, ) files.link( name='Symlink prometheus to /usr/bin', path='{{ host.data.prometheus_bin_dir }}/prometheus', # link target= '{{ host.data.prometheus_install_dir }}/{{ host.data.prometheus_version_name }}/prometheus', state=state, host=host, )
if host.fact.linux_name == 'Ubuntu': apt.packages( name='Install wget', packages=['wget'], update=True, ) # Full URL: # http://dl-cdn.alpinelinux.org/alpine/v3.11/releases/x86_64/alpine-netboot-3.11.2-x86_64.tar.gz # sha256 is here # http://dl-cdn.alpinelinux.org/alpine/v3.11/releases/x86_64/alpine-netboot-3.11.2-x86_64.tar.gz.sha256 tarfile = 'alpine-netboot-3.11.2-x86_64.tar.gz' tarfile_full_path = '/tmp/{}'.format(tarfile) sha256file = tarfile + '.sha256' sha256file_full_path = '/tmp/{}'.format(sha256file) # TODO: Check if download was successful files.download( name='Download `{}`'.format(tarfile), src='http://dl-cdn.alpinelinux.org/alpine/v3.11/releases/x86_64/{}'.format(tarfile), dest=tarfile_full_path, ) files.download( name='Download `{}`'.format(sha256file), src='http://dl-cdn.alpinelinux.org/alpine/v3.11/releases/x86_64/{}'.format(sha256file), dest=sha256file_full_path, )
def configure_installer_type_autoinstall_v1(state=None, host=None): # # Download the OS installer image # iso_path = host.data.pxe.http.root_dir.joinpath( host.data.pxe.installer.image_source_url.path.lstrip('/')) files.directory( name=f"Ensure {iso_path.parent}", path=str(iso_path.parent), present=True, sudo=True, host=host, state=state, ) download_installer = files.download( name=f'Download Installer Image to {iso_path}', src=str(host.data.pxe.installer.image_source_url), dest=str(iso_path), sha256sum=host.data.pxe.installer.image_sha256sum, sudo=True, host=host, state=state, ) # This deploy only supports serving one OS version for now and # to ensure that the extracted bootstrap kernel and ramdisk come # from the correct ISO, we use this template as one of the signals # in the extraction logic further down. Without this, the OS version # being served might change but the bootstrap kernel and ramdisk # might not. current_installer = files.template( name='Signal Current Installer', src=str(deploy_dir / 'templates' / 'current-installer.j2'), dest=str(host.data.pxe.tftp.root_dir / 'current-installer'), pxe=host.data.pxe, sudo=True, host=host, state=state, ) # # Extract the kernel and ram disk image for use by the bootloader # kernel_path = str(host.data.pxe.tftp.root_dir / 'vmlinuz') initrd_path = str(host.data.pxe.tftp.root_dir / 'initrd') if host.fact.file(kernel_path) is None or \ host.fact.file(initrd_path) is None or \ download_installer.changed or \ current_installer.changed: server.shell( name='Mount the ISO to /mnt', commands=[ f'mount | grep "{iso_path} on /mnt" || mount {iso_path} /mnt', ], sudo=True, host=host, state=state) server.shell( name="Extract kernel and initrd from ISO", commands=[ f'cp /mnt/casper/vmlinuz {kernel_path}', f'cp /mnt/casper/initrd {initrd_path}', ], sudo=True, host=host, state=state, ) server.shell( name=f'Ensure {iso_path} is unmounted', commands=[ f'(mount | grep "{iso_path} on /mnt" && umount /mnt) || :', ], sudo=True, host=host, state=state) # # Render GRUB2 config # files.template( name='Render GRUB config', src=str(deploy_dir / 'templates' / 'grub2.autoinstall-v1.cfg.j2'), dest=str(host.data.pxe.tftp.root_dir / 'grub' / 'grub.cfg'), pxe=host.data.pxe, os_name=Path(host.data.pxe.installer.image_source_url.path).stem, kernel_filename=Path(kernel_path).name, initrd_filename=Path(initrd_path).name, sudo=True, host=host, state=state, ) # # Render the machine-specific user-data and meta-data files # for machine in host.data.pxe.machines: meta_data_path = host.data.pxe.http.root_dir / machine.hostname / 'meta-data' files.template( name=f'Render {meta_data_path}', src=str(deploy_dir / 'templates' / 'meta-data.j2'), dest=str(meta_data_path), create_remote_dir=True, sudo=True, machine=machine, host=host, state=state, ) user_data_path = host.data.pxe.http.root_dir / machine.hostname / 'user-data' files.template( name=f'Render {user_data_path}', src=str(deploy_dir / 'templates' / 'user-data.j2'), dest=str(user_data_path), create_remote_dir=True, sudo=True, machine=machine, host=host, state=state, )
for bootloader in host.data.dnsmasq["tftp"]["bootloaders"]: bootloader_dir = \ Path(host.data.dnsmasq["tftp"]["root_dir"]) / bootloader["client_type"] files.directory( name=f'Ensure bootloader directory {bootloader_dir}', path=str(bootloader_dir), present=True, sudo=True, ) files.download( name=f'Download bootloader {bootloader["source_url"]}', src=str(bootloader["source_url"]), dest=str(bootloader_dir / bootloader["source_url"].split('/')[-1]), sha256sum=bootloader["sha256sum"], sudo=True, ) dnsmasq_conf = files.template( name='Render the dnsmasq config', src='templates/pxe/dnsmasq.conf.j2', dest='/etc/dnsmasq.conf', mode='744', user='******', group='root', sudo=True, dnsmasq=host.data.dnsmasq, machines=host.data.machines, )
assert status is True # ensure the command executed OK if 'Vagrant ' not in str(stdout): raise Exception( '`{}` did not work as expected.stdout:{} stderr:{}'.format( command, stdout, stderr)) if host.fact.linux_name == 'Ubuntu': apt.packages( {'Install required packages'}, ['wget', 'unzip', 'python3'], update=True, ) files.download( {'Download the Vagrantup Downloads page'}, 'https://www.vagrantup.com/downloads.html', '/tmp/downloads.html', ) server.script_template( {'Use wget to download and unzip to /usr/local/bin'}, 'templates/download_vagrant.bash.j2', ) python.call( {'Verify vagrant is installed by running version command'}, verify_vagrant, )
def install_concourse(concourse_config: ConcourseBaseConfig, state=None, host=None): # Create a Concourse system user server.user( name="Create the Concourse system user", user=concourse_config.user, present=True, home=concourse_config.deploy_directory, ensure_home=False, shell="/bin/false", # noqa: S604 system=True, state=state, host=host, ) installation_directory = ( f"{concourse_config.deploy_directory}-{concourse_config.version}") if not host.fact.directory(installation_directory): # Download latest Concourse release from GitHub concourse_archive = f"https://github.com/concourse/concourse/releases/download/v{concourse_config.version}/concourse-{concourse_config.version}-linux-amd64.tgz" # noqa: E501 concourse_archive_hash = f"https://github.com/concourse/concourse/releases/download/v{concourse_config.version}/concourse-{concourse_config.version}-linux-amd64.tgz.sha1" # noqa: E501 concourse_archive_path = ( f"/tmp/concourse-{concourse_config.version}.tgz" # noqa: S108 ) files.download( name="Download the Concourse release archive", src=concourse_archive, dest=concourse_archive_path, sha1sum=httpx.get(concourse_archive_hash).read().decode( "utf8").split()[0], state=state, host=host, ) # Unpack Concourse to /opt/concourse server.shell( name="Extract the Concourse release archive.", commands=[ f"tar -xvzf {concourse_archive_path}", f"mv concourse {installation_directory}", ], state=state, host=host, ) # Verify ownership of Concourse directory files.directory( name="Set ownership of Concourse directory", path=installation_directory, user=concourse_config.user, state=state, host=host, ) # Link Concourse installation to target directory active_installation_path = files.link( name="Link Concourse installation to target directory", path=concourse_config.deploy_directory, target=f"{installation_directory}", user=concourse_config.user, symbolic=True, present=True, state=state, host=host, ) return active_installation_path.changed
files.template( {'Create a templated file'}, 'templates/default.j2', '/netboot/tftp/pxelinux.cfg/default', pxe_server=pxe_server, ) # TODO: check sha # TODO: check pgp? # iso = 'ubuntu-18.04.3-desktop-amd64.iso' iso = 'ubuntu-18.04.3-live-server-amd64.iso' iso_full_path = '/tmp/{}'.format(iso) files.download( {'Download `{}` iso'.format(iso)}, 'http://releases.ubuntu.com/18.04/{}'.format(iso), iso_full_path, ) server.shell( {'Mount iso'}, 'mount -o loop {} /mnt'.format(iso_full_path), ) server.shell( {'Copy contents of ISO to mount'}, 'cp -Rfv /mnt/* /netboot/nfs/ubuntu1804/', ) # copy vmlinuz and initrd files init_files = ['vmlinuz', 'initrd']
name="Install packages", packages=["dnsmasq"], update=True, ) tftp_dir = "/srv/tftp" files.directory( name="Ensure the `{}` exists".format(tftp_dir), path=tftp_dir, ) tar_file = "netboot.tar.gz" tar_file_full_path = "/tmp/{}".format(tar_file) files.download( name="Download `{}`".format(tar_file), src="http://archive.ubuntu.com/ubuntu/dists/bionic-updates/main/" "installer-amd64/current/images/netboot/{}".format(tar_file), dest=tar_file_full_path, ) server.shell( name="Extract files from tar file", commands="tar -xvzf {} -C {}".format(tar_file_full_path, tftp_dir), ) server.shell( name="Change permissions", commands="chown -R nobody:nogroup {}".format(tftp_dir), ) uefi_file = "grubnetx64.efi.signed" uefi_full_path = "{}/{}".format(tftp_dir, uefi_file)
from pyinfra import host from pyinfra.facts.files import File from pyinfra.facts.server import LinuxName from pyinfra.operations import files # Note: This requires files in the files/ directory. if host.get_fact(LinuxName) in ["CentOS", "RedHat"]: files.download( name="Download the Docker repo file", src="https://download.docker.com/linux/centos/docker-ce.repo", dest="/etc/yum.repos.d/docker-ce.repo", ) files.put( name="Update the message of the day file", src="files/motd", dest="/etc/motd", mode="644", ) # prepare to do some maintenance maintenance_line = "SYSTEM IS DOWN FOR MAINTENANCE" # files.line( # name='Add the down-for-maintenance line in /etc/motd', # '/etc/motd', # maintenance_line, # ) # do some maintenance... # Then, after the maintenance is done, remove the maintenance line
{'Install packages'}, ['dnsmasq'], update=True, ) tftp_dir = '/srv/tftp' files.directory( {'Ensure the `{}` exists'.format(tftp_dir)}, tftp_dir, ) tar_file = 'netboot.tar.gz' tar_file_full_path = '/tmp/{}'.format(tar_file) files.download( {'Download `{}`'.format(tar_file)}, 'http://archive.ubuntu.com/ubuntu/dists/bionic-updates/main/' 'installer-amd64/current/images/netboot/{}'.format(tar_file), tar_file_full_path, ) server.shell( {'Extract files from tar file'}, 'tar -xvzf {} -C {}'.format(tar_file_full_path, tftp_dir), ) server.shell( {'Change permissions'}, 'chown -R nobody:nogroup {}'.format(tftp_dir), ) uefi_file = 'grubnetx64.efi.signed' uefi_full_path = '{}/{}'.format(tftp_dir, uefi_file)
files.template( name='Create a templated file', src='templates/default.j2', dest='/netboot/tftp/pxelinux.cfg/default', pxe_server=pxe_server, ) # TODO: check sha # TODO: check pgp? # iso = 'ubuntu-18.04.3-desktop-amd64.iso' iso = 'ubuntu-18.04.3-live-server-amd64.iso' iso_full_path = '/tmp/{}'.format(iso) files.download( name='Download `{}` iso'.format(iso), src='http://releases.ubuntu.com/18.04/{}'.format(iso), dest=iso_full_path, ) server.shell( name='Mount iso', commands='mount -o loop {} /mnt'.format(iso_full_path), ) server.shell( name='Copy contents of ISO to mount', commands='cp -Rfv /mnt/* /netboot/nfs/ubuntu1804/', ) # copy vmlinuz and initrd files init_files = ['vmlinuz', 'initrd']
from pyinfra import host from pyinfra.operations import files # Note: This requires files in the files/ directory. SUDO = True if host.fact.linux_name in ['CentOS', 'RedHat']: files.download( {'Download the Docker repo file'}, 'https://download.docker.com/linux/centos/docker-ce.repo', '/etc/yum.repos.d/docker-ce.repo', ) files.put( {'Update the message of the day file'}, 'files/motd', '/etc/motd', mode='644', ) # prepare to do some maintenance maintenance_line = 'SYSTEM IS DOWN FOR MAINTENANCE' # files.line( # {'Add the down-for-maintenance line in /etc/motd'}, # '/etc/motd', # maintenance_line, # ) # do some maintenance... # Then, after the maintenance is done, remove the maintenance line
apt.packages( {'Install wget'}, ['wget'], update=True, ) # Full URL: # http://dl-cdn.alpinelinux.org/alpine/v3.11/releases/x86_64/alpine-netboot-3.11.2-x86_64.tar.gz # sha256 is here # http://dl-cdn.alpinelinux.org/alpine/v3.11/releases/x86_64/alpine-netboot-3.11.2-x86_64.tar.gz.sha256 tarfile = 'alpine-netboot-3.11.2-x86_64.tar.gz' tarfile_full_path = '/tmp/{}'.format(tarfile) sha256file = tarfile + '.sha256' sha256file_full_path = '/tmp/{}'.format(sha256file) # TODO: Check if download was successful files.download( {'Download `{}`'.format(tarfile)}, 'http://dl-cdn.alpinelinux.org/alpine/v3.11/releases/x86_64/{}'.format( tarfile), tarfile_full_path, ) files.download( {'Download `{}`'.format(sha256file)}, 'http://dl-cdn.alpinelinux.org/alpine/v3.11/releases/x86_64/{}'.format( sha256file), sha256file_full_path, )
def install_hashicorp_products(hashicorp_products: List[HashicorpProduct], state=None, host=None): apt.packages( name="Ensure unzip is installed", packages=["unzip"], update=True, state=state, host=host, ) for product in hashicorp_products: server.user( name=f"Create system user for {product.name}", user=product.name, system=True, shell="/bin/false", # noqa: S604 state=state, host=host, ) if linux_family(host.fact.linux_name).lower == "debian": cpu_arch = host.fact.debian_cpu_arch elif linux_family(host.fact.linux_name).lower == "redhat": cpu_arch = host.fact.redhat_cpu_arch else: cpu_arch = "amd64" file_download = f"{product.name}_{product.version}_linux_{cpu_arch}.zip" file_hashes = ( httpx.get( "https://releases.hashicorp.com/{product_name}/{product_version}/{product_name}_{product_version}_SHA256SUMS" .format( # noqa: E501 product_name=product.name, product_version=product.version)).read().decode( "utf8").strip("\n").split("\n")) file_hash_map = { file_hash.split()[1]: file_hash.split()[0] for file_hash in file_hashes } download_destination = f"/tmp/{product.name}.zip" # noqa: S108 target_directory = product.install_directory or "/usr/local/bin/" download_binary = files.download( name=f"Download {product.name} archive", src= f"https://releases.hashicorp.com/{product.name}/{product.version}/{file_download}", # noqa: WPS221,E501 dest=download_destination, sha256sum=file_hash_map[file_download], state=state, host=host, ) server.shell( name=f"Unzip {product.name}", commands=[ f"unzip -o {download_destination} -d {target_directory}" ], state=state, host=host, ) files.file( name=f"Ensure {product.name} binary is executable", path=Path(target_directory).joinpath(product.name), assume_present=download_binary.changed, user=product.name, group=product.name, mode="755", state=state, host=host, ) files.directory( name=f"Ensure configuration directory for {product.name}", path=product.configuration_directory or product.configuration_file.parent, present=True, user=product.name, group=product.name, recursive=True, state=state, host=host, ) if hasattr(product, "data_directory"): # noqa: WPS421 files.directory( name=f"Create data directory for {product.name}", path=product.data_directory, present=True, user=product.name, group=product.name, recursive=True, state=state, host=host, )
def configure_installer_type_legacy_netboot(state=None, host=None): # # Download the netboot archive # archive_path = host.data.pxe.tftp.root_dir.joinpath( host.data.pxe.installer.netboot_source_url.path.lstrip('/')) files.directory( name=f"Ensure {archive_path.parent}", path=str(archive_path.parent), present=True, sudo=True, host=host, state=state, ) download_netboot_archive = files.download( name=f'Download netboot archive to {archive_path}', src=str(host.data.pxe.installer.netboot_source_url), dest=str(archive_path), sha256sum=host.data.pxe.installer.netboot_sha256sum, sudo=True, host=host, state=state, ) # # Download the OS iso # iso_path = host.data.pxe.http.root_dir.joinpath( host.data.pxe.installer.image_source_url.path.lstrip('/')) files.directory( name=f"Ensure {iso_path.parent}", path=str(iso_path.parent), present=True, sudo=True, host=host, state=state, ) server.shell(name=f'Ensure nothing is mounted on {iso_path.parent}/mnt', commands=[ f'(mount | grep " on {iso_path.parent}/mnt" && ' f'umount {iso_path.parent}/mnt) || :', ], sudo=True, host=host, state=state) download_iso = files.download( name=f'Download OS iso to {iso_path}', src=str(host.data.pxe.installer.image_source_url), dest=str(iso_path), sudo=True, sha256sum=host.data.pxe.installer.image_sha256sum, host=host, state=state, ) iso_mount_path = iso_path.parent / 'mnt' server.shell(name=f'Mount the ISO to {iso_path.parent}/mnt', commands=[ f'mkdir -p {iso_path.parent}/mnt', f'mount {iso_path} {iso_path.parent}/mnt', ], sudo=True, host=host, state=state) # This deploy only supports serving one OS version for now and # to ensure that the extracted bootstrap kernel and ramdisk come # from the correct ISO, we use this template as one of the signals # in the extraction logic further down. Without this, the OS version # being served might change but the bootstrap kernel and ramdisk # might not. current_installer = files.template( name='Signal Current Installer', src=str(deploy_dir / 'templates' / 'current-installer.j2'), dest=str(host.data.pxe.tftp.root_dir / 'current-installer'), pxe=host.data.pxe, sudo=True, host=host, state=state, ) # # Extract the kernel and ram disk image for use by the bootloader # kernel_path = host.data.pxe.tftp.root_dir / 'linux' initrd_path = host.data.pxe.tftp.root_dir / 'initrd.gz' if host.fact.file(kernel_path) is None or \ host.fact.file(initrd_path) is None or \ download_netboot_archive.changed or \ download_iso.changed or \ current_installer.changed: # We make use of the kernel and initrd in the netboot archive # since the the ones in the 18.04 iso, specifically under the # {iso_mount_path}/install directory, fail to work properly. kernel_path_in_archive = './ubuntu-installer/amd64/linux' initrd_path_in_archive = './ubuntu-installer/amd64/initrd.gz' server.shell( name='Extract the files from the netboot installer', commands=[ f'tar -zxvf {archive_path} -C {kernel_path.parent} ' f'--strip-components={kernel_path_in_archive.count("/")} ' f'{kernel_path_in_archive}', f'tar -zxvf {archive_path} -C {initrd_path.parent} ' f'--strip-components={initrd_path_in_archive.count("/")} ' f'{initrd_path_in_archive}', ], sudo=True, host=host, state=state) # # Render Legacy Preseed Config # legacy_preseed_manual_path = host.data.pxe.http.root_dir.joinpath( 'legacy-preseed-manual.seed') legacy_preseed_auto_dir = host.data.pxe.http.root_dir.joinpath( 'legacy-preseed-auto') net_image_disk_path = iso_mount_path / 'install' / 'filesystem.squashfs' net_image_http_path = str( Path(host.data.pxe.installer.image_source_url.path.lstrip( '/')).parent.joinpath('mnt', 'install', 'filesystem.squashfs')) server.shell(name='Check that squashfs file exists', commands=[ f'test -f {net_image_disk_path}', ], sudo=True, host=host, state=state) files.template( name='Render legacy preseed config', src=str(deploy_dir / 'templates' / 'legacy-preseed-manual.seed.j2'), dest=str(host.data.pxe.http.root_dir / legacy_preseed_manual_path), sudo=True, pxe=host.data.pxe, net_image_http_path=net_image_http_path, host=host, state=state, ) # # Render GRUB2 config # installer_path = Path( host.data.pxe.installer.netboot_source_url.path.lstrip('/')) files.template( name='Render GRUB config', src=str(deploy_dir / 'templates' / 'grub2.legacy-netboot.cfg.j2'), dest=str(host.data.pxe.tftp.root_dir / 'grub' / 'grub.cfg'), sudo=True, initrd_filename=Path(initrd_path).name, installer_path=installer_path, kernel_filename=kernel_path.name, legacy_preseed_auto_dir=legacy_preseed_auto_dir.stem, legacy_preseed_manual_path=legacy_preseed_manual_path.name, net_image_http_path=net_image_http_path, os_name=Path(host.data.pxe.installer.image_source_url.path).stem, pxe=host.data.pxe, host=host, state=state, ) # # Render the machine-specific preseed files # for machine in host.data.pxe.machines: machine_legacy_preseed_path = legacy_preseed_auto_dir / machine.hostname files.template( name=f'Render {machine_legacy_preseed_path}', src=deploy_dir / 'templates' / 'legacy-preseed-auto.seed.j2', dest=machine_legacy_preseed_path, create_remote_dir=True, sudo=True, machine=machine, pxe=host.data.pxe, host=host, state=state, )
def install_caddy(caddy_config: CaddyConfig, state=None, host=None): if caddy_config.plugins: caddy_user = "******" server.user( name="Create system user for Caddy", user=caddy_user, system=True, ensure_home=False, state=state, host=host, ) caddy_install = files.download( name="Download custom build of Caddy", dest="/usr/local/bin/caddy", src=caddy_config.custom_download_url(), mode=DEFAULT_DIRECTORY_MODE, state=state, host=host, ) files.directory( name="Create Caddy configuration directory", path="/etc/caddy/", user=caddy_user, group=caddy_user, present=True, recursive=True, state=state, host=host, ) files.directory( name="Create Caddy configuration directory", path=caddy_config.data_directory, user=caddy_user, group=caddy_user, present=True, recursive=True, state=state, host=host, ) files.template( name="Create SystemD service definition for Caddy", dest="/usr/lib/systemd/system/caddy.service", src=Path(__file__).parent.joinpath("templates/caddy.service.j2"), state=state, host=host, ) else: apt.key( name="Add Caddy repository GPG key", src="https://dl.cloudsmith.io/public/caddy/stable/gpg.key", state=state, host=host, ) apt.repo( name="Set up Caddy APT repository", src="deb https://dl.cloudsmith.io/public/caddy/stable/deb/debian any-version main", # noqa: E501 present=True, filename="caddy.list", state=state, host=host, ) caddy_install = apt.packages( name="Install Caddy from APT", packages=["caddy"], present=True, latest=True, update=True, state=state, host=host, ) if caddy_config.log_file: files.directory( name="Crate Caddy log directory", path=caddy_config.log_file.parent, user=caddy_user, present=True, state=state, host=host, ) return caddy_install.changed
from pyinfra import host from pyinfra.operations import files # Note: This requires files in the files/ directory. SUDO = True if host.fact.linux_name in ['CentOS', 'RedHat']: files.download( name='Download the Docker repo file', src='https://download.docker.com/linux/centos/docker-ce.repo', dest='/etc/yum.repos.d/docker-ce.repo', ) files.put( name='Update the message of the day file', src='files/motd', dest='/etc/motd', mode='644', ) # prepare to do some maintenance maintenance_line = 'SYSTEM IS DOWN FOR MAINTENANCE' # files.line( # name='Add the down-for-maintenance line in /etc/motd', # '/etc/motd', # maintenance_line, # ) # do some maintenance... # Then, after the maintenance is done, remove the maintenance line