def TestGcloudUpToDate(self): """ Test for gcloud/gsutil (some distros won't have this) and validate that versions are up to date. https://github.com/GoogleCloudPlatform/compute-image-tools/issues/400 """ # firstly check if gcloud and gsutil are available try: rc_gcloud, output = utils.Execute(['gcloud'], raise_errors=False) rc_gsutil, output = utils.Execute(['gsutil'], raise_errors=False) except OSError as e: if e.errno == 2: # No such file or directory # command is not available, skip this test return raise e # Avoid log output overload on centos-6 time.sleep(1) # now test if their API are still valid utils.Execute(['gcloud', 'compute', 'images', 'list']) # Avoid log output overload on centos-6 time.sleep(1) utils.Execute(['gsutil', 'ls']) time.sleep(1)
def main(): raise_on_not_found = True logs_path = utils.GetMetadataAttribute('daisy-logs-path', raise_on_not_found) outs_path = utils.GetMetadataAttribute('daisy-outs-path', raise_on_not_found) uefi = utils.GetMetadataAttribute('rhel_uefi') == 'true' # Mount the installer disk. if uefi: utils.Execute(['mount', '/dev/sdb2', '/mnt']) else: utils.Execute(['mount', '/dev/sdb1', '/mnt']) logging.info('Installer root: %s' % os.listdir('/mnt')) logging.info('Build logs: %s' % os.listdir('/mnt/build-logs')) utils.UploadFile('/mnt/ks.cfg', '%s/ks.cfg' % logs_path) directory = '/mnt/build-logs' for log in os.listdir(directory): if os.path.isfile(log): utils.UploadFile(os.path.join(directory, log), '%s/%s' % (logs_path, log)) utils.UploadFile('/mnt/build-logs/synopsis.json', '%s/synopsis.json' % outs_path) utils.Execute(['umount', '-l', '/mnt'])
def TestPackageInstallation(self): """ Network instabilities can lead to errors when fetching the apt repository. It worths to try if that happens """ utils.Execute(['apt-get', 'update']) utils.Execute(['apt-get', 'install', '--reinstall', '-y', 'tree'])
def TestPackageInstallation(self): """ Network instabilities can lead to errors when fetching the yum repository. It worths to try if that happens """ # install something to test repository sanity utils.Execute(['yum', '-y', 'install', 'tree']) # in case it was already installed, ask for reinstall just to be sure utils.Execute(['yum', '-y', 'reinstall', 'tree'])
def main(): disk = '/dev/sdb' g = diskutils.MountDisk(disk) DistroSpecific(g) utils.CommonRoutines(g) diskutils.UnmountDisk(g) utils.Execute(['virt-customize', '-a', disk, '--selinux-relabel'])
def CheckNtpRun(cmd): """ Run @cmd and check, if successful, whether google server is found on output. Args: cmd: list of strings. Command to be passed to utils.Exceute Return value: bool. True if client exists and google server is found. False otherwise. """ try: rc, out = utils.Execute(cmd, raise_errors=False, capture_output=True) if rc == 0: # ntp client found on system if out.find('metadata.google') >= 0: # Google server found return True except OSError: # just consider it as a regular error as below pass # Command didn't run successfully return False
def CheckSecurityParameter(key, desired_value): rc, output = utils.Execute(['sysctl', '-e', key], capture_output=True) actual_value = int(output.split("=")[1]) if actual_value != desired_value: raise Exception('Security Parameter %s is %d but expected %d' % (key, actual_value, desired_value))
def TestRootPasswordDisabled(self): """ Ensure root password is disabled (/etc/passwd) """ # It's actually empty and it's fine according to: # https://forums.freebsd.org/threads/jails-default-root-password-is-empty-not-starred-out.37701/ utils.Execute(['grep', '^root::', '/etc/master.passwd'])
def main(): disk = '/dev/sdb' g = diskutils.MountDisk(disk) run_translate(g) utils.CommonRoutines(g) cleanup(g) utils.Execute(['virt-customize', '-a', disk, '--selinux-relabel'])
def gen_ssh_key(): key_name = 'daisy-test-key-' + str(uuid.uuid4()) utils.Execute( ['ssh-keygen', '-t', 'rsa', '-N', '', '-f', key_name, '-C', key_name]) with open(key_name + '.pub', 'r') as original: data = original.read() return "tester:" + data, key_name
def GetCurrentUsername(): # TODO: replace gcloud usage by python CLI _, username = utils.Execute([ 'gcloud', 'compute', 'os-login', 'describe-profile', '--format', 'value(posixAccounts.username)' ], capture_output=True) return username.strip()
def main(): utils.AptGetInstall(['libguestfs-tools']) disk = '/dev/sdb' g = diskutils.MountDisk(disk) DistroSpecific(g) utils.CommonRoutines(g) diskutils.UnmountDisk(g) utils.Execute(['virt-customize', '-a', disk, '--selinux-relabel'])
def TestGcloudUpToDate(self): """ Test for gcloud/gsutil (some distros won't have this) and validate that versions are up to date. https://github.com/GoogleCloudPlatform/compute-image-tools/issues/400 """ # firstly check if gcloud and gsutil are available rc_gcloud, output = utils.Execute(['gcloud', 'info'], raise_errors=False) rc_gsutil, output = utils.Execute(['gsutil', 'version'], raise_errors=False) if rc_gcloud != 0 or rc_gsutil != 0: # if these commands are not available, skip this test return # now test if their API are still valid utils.Execute(['gcloud', 'compute', 'images', 'list']) utils.Execute(['gsutil', 'ls'])
def TestNTPConfig(self): """ Ensure that the NTP server is set to metadata.google.internal. """ # Below, not the most pythonic thing to do... but it's the easiest one command = [ 'grep', '^server \+metadata.google.internal', '/etc/ntp.conf' ] utils.Execute(command)
def main(): raise_on_not_found = True logs_path = utils.GetMetadataAttribute('daisy-logs-path', raise_on_not_found) outs_path = utils.GetMetadataAttribute('daisy-outs-path', raise_on_not_found) # Mount the installer disk. utils.Execute(['mount', '/dev/sdb1', '/mnt']) logging.info('Installer root: %s' % os.listdir('/mnt')) logging.info('Build logs: %s' % os.listdir('/mnt/build-logs')) utils.UploadFile('/mnt/ks.cfg', '%s/' % logs_path) directory = '/mnt/build-logs' for f in os.listdir(directory): if os.path.isfile(f): utils.UploadFile('%s/%s' % (directory, f), '%s/' % logs_path) utils.UploadFile('/mnt/build-logs/synopsis.json', '%s/synopsis.json' % outs_path) utils.Execute(['umount', '-l', '/mnt'])
def TestRootPasswordDisabled(self): """ Ensure root password is disabled (/etc/passwd) """ # as 'man shadow' described: # If the password field contains some string that is not a valid result of # crypt(3), for instance ! or *, the user will not be able to use a unix # password to log in # # Below, not the most pythonic thing to do... but it's the easiest one utils.Execute(['grep', '^root:[\\!*]', '/etc/shadow'])
def DistroSpecific(g): ubu_release = utils.GetMetadataParam('ubuntu_release') install_gce = utils.GetMetadataParam('install_gce_packages') if install_gce == 'true': g.command(['apt-get', 'update']) logging.info('Installing cloud-init.') g.sh('DEBIAN_FRONTEND=noninteractive apt-get install -y' ' --no-install-recommends cloud-init') # Try to remove azure or aws configs so cloud-init has a chance. g.sh('rm -f /etc/cloud/cloud.cfg.d/*azure*') g.sh('rm -f /etc/cloud/cloud.cfg.d/*waagent*') g.sh('rm -f /etc/cloud/cloud.cfg.d/*walinuxagent*') g.sh('rm -f /etc/cloud/cloud.cfg.d/*aws*') g.sh('rm -f /etc/cloud/cloud.cfg.d/*amazon*') # Remove Azure agent. try: g.command( ['apt-get', 'remove', '-y', '-f', 'waagent', 'walinuxagent']) except Exception as e: logging.debug(str(e)) logging.warn('Could not uninstall Azure agent. Continuing anyway.') g.write('/etc/apt/sources.list.d/partner.list', partner_list.format(ubu_release=ubu_release)) g.write('/etc/cloud/cloud.cfg.d/91-gce-system.cfg', gce_system) # Use host machine as http proxy so cloud-init can access GCE API with open('/etc/tinyproxy/tinyproxy.conf', 'w') as cfg: cfg.write(tinyproxy_cfg) utils.Execute(['/etc/init.d/tinyproxy', 'restart']) default_gw = g.sh("ip route | awk '/default/ { printf $3 }'") logging.debug( g.sh('http_proxy="http://%s:8888" cloud-init -d init' % default_gw)) logging.info('Installing GCE packages.') g.command(['apt-get', 'update']) g.sh( 'DEBIAN_FRONTEND=noninteractive apt-get install -y' ' --no-install-recommends gce-compute-image-packages google-cloud-sdk' ) # Update grub config to log to console. g.command([ 'sed', '-i', r's#^\(GRUB_CMDLINE_LINUX=".*\)"$#\1 console=ttyS0,38400n8"#', '/etc/default/grub' ]) g.command(['update-grub2'])
def TestAutomaticSecurityUpdates(self): # the following command returns zero if package is installed utils.Execute(['yum', '--assumeno', 'install', 'yum-cron']) # service returns zero if service exists and is running utils.Execute(['service', 'yum-cron', 'status']) # check yum-cron configuration # Now this part is, unfortunately, different between RedHat 6 and 7 yum_cron_file, configs = self.GetYumCronConfig() for key in configs: command = ['grep', key, yum_cron_file] rc, output = utils.Execute(command, capture_output=True) # get clean text after '=' token cur_value = generic_distro.RemoveCommentAndStrip( output[output.find('=') + 1:]) if configs[key] != cur_value: raise Exception( 'Yum-cron config "%s" is "%s" but expected "%s"' % (key, cur_value, configs[key]))
def main(): logs_path = utils.GetMetadataParam('daisy-logs-path', raise_on_not_found=True) outs_path = utils.GetMetadataParam('daisy-outs-path', raise_on_not_found=True) # Mount the installer disk. utils.Execute(['mount', '-t', 'ext4', '/dev/sdb1', '/mnt']) logging.info('Installer root: %s', os.listdir('/mnt')) logging.info('Build logs: %s', os.listdir('/mnt/build-logs')) # For some reason we need to remove the gsutil credentials. utils.Execute(['rm', '-Rf', '/root/.gsutil']) utils.Execute(['gsutil', 'cp', '/mnt/ks.cfg', '%s/' % logs_path], raise_errors=False) utils.Execute(['gsutil', 'cp', '/mnt/build-logs/*', '%s/' % logs_path], raise_errors=False) utils.Execute([ 'gsutil', 'cp', '/mnt/build-logs/synopsis.json', '%s/synopsis.json' % outs_path ], raise_errors=False) utils.Execute(['umount', '-l', '/mnt'])
def TestRsyslogConfig(self): """ Ensure that rsyslog is installed and configured and that the hostname is properly set in the logs on boot. """ # test if kernel and daemon messages are being logged to console. The # hostname output will be checked by the step "rsyslog-hostname-test" info = [ ['kern.info', 'RsyslogKernelConsoleTest'], ['daemon.info', 'RsyslogDaemonConsoleTest'], ] for facility in info: utils.Execute(['logger', '-p'] + facility)
def get_physical_drives(): rc, output = utils.Execute(['lsblk', '--noheadings', '--output=NAME', '--paths', '--list', '--nodeps', '-e7'], capture_output=True) disks = [] if rc == 0: disks = output.split('\n') disks.remove('') else: logging.info('Warning: Failed to excute \'lsblk\' cmd, ' 'Continuing anyway assuming that there are only two disks.') disks = ['/dev/sda', '/dev/sdb'] return disks
def HasFoundConfig(config_file, key, value): """ Return True if @value is found inside of a @key line on @config_file. """ command = ['grep', key, config_file] rc, output = utils.Execute(command, capture_output=True) output_lines = output.split('\n') useful_lines = filter(generic_distro.RemoveCommentAndStrip, output_lines) for line in useful_lines: if line.find(value) >= 0: # found desired value return True return False
def test_login(key, expect_fail=False): for try_again in range(3): ret, _ = utils.Execute( ['ssh', '-i', key, '-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile=/dev/null', 'tester@' + TESTEE, 'echo', 'Logged'], raise_errors=False) if expect_fail and ret == 0: error = 'SSH Loging succeeded when expected to fail' elif not expect_fail and ret != 0: error = 'SSH Loging failed when expected to succeed' else: return time.sleep(5) raise ValueError(error)
def TestNetworkInterfaceMTU(self): """ Ensure that the network interface MTU is set to 1460. """ # Parsing from ifconfig as BSD has no sysfs rc, output = utils.Execute(['ifconfig'], capture_output=True) for line in output.split('\n'): token = 'mtu ' token_pos = line.find(token) if token_pos >= 0: desired_mtu = 1460 cur_mtu = int(line[token_pos + len(token):]) if cur_mtu != desired_mtu: raise Exception('Network MTU is %d but expected %d' % (cur_mtu, desired_mtu))
def TestAutomaticSecurityUpdates(self): package_name = 'unattended-upgrades' if not self.IsPackageInstalled(package_name): raise Exception('%s package is not installed' % package_name) # check unattended upgrade configuration command = ['unattended-upgrade', '-v'] rc, output = utils.Execute(command, capture_output=True) for line in output.split('\n'): token = "Allowed origins are:" if line.find(token) >= 0: if len(line) > len(token): # There is some repository used for unattended upgrades return raise Exception('No origin repository used by unattended-upgrade')
def main(): # Get Parameters repo = utils.GetMetadataAttribute('google_cloud_repo', raise_on_not_found=True) release = utils.GetMetadataAttribute('el_release', raise_on_not_found=True) savelogs = utils.GetMetadataAttribute('el_savelogs', raise_on_not_found=False) savelogs = savelogs == 'true' byol = utils.GetMetadataAttribute('rhel_byol', raise_on_not_found=False) byol = byol == 'true' sap_hana = utils.GetMetadataAttribute('rhel_sap_hana', raise_on_not_found=False) sap_hana = sap_hana == 'true' sap_apps = utils.GetMetadataAttribute('rhel_sap_apps', raise_on_not_found=False) sap_apps = sap_apps == 'true' sap = utils.GetMetadataAttribute('rhel_sap', raise_on_not_found=False) sap = sap == 'true' logging.info('EL Release: %s' % release) logging.info('Google Cloud repo: %s' % repo) logging.info('Build working directory: %s' % os.getcwd()) iso_file = 'installer.iso' # Necessary libs and tools to build the installer disk. utils.AptGetInstall(['dosfstools', 'rsync']) # Build the kickstart file. ks_content = ks_helpers.BuildKsConfig(release, repo, byol, sap, sap_hana, sap_apps) ks_cfg = 'ks.cfg' utils.WriteFile(ks_cfg, ks_content) # Write the installer disk. Write GPT label, create partition, # copy installer boot files over. logging.info('Writing installer disk.') utils.Execute(['parted', '/dev/sdb', 'mklabel', 'gpt']) utils.Execute(['sync']) utils.Execute(['parted', '/dev/sdb', 'mkpart', 'primary', 'fat32', '1MB', '201MB']) utils.Execute(['sync']) utils.Execute(['parted', '/dev/sdb', 'mkpart', 'primary', 'ext2', '201MB', '100%']) utils.Execute(['sync']) utils.Execute(['parted', '/dev/sdb', 'set', '1', 'boot', 'on']) utils.Execute(['sync']) utils.Execute(['parted', '/dev/sdb', 'set', '1', 'esp', 'on']) utils.Execute(['sync']) utils.Execute(['mkfs.vfat', '-F', '32', '/dev/sdb1']) utils.Execute(['sync']) utils.Execute(['fatlabel', '/dev/sdb1', 'ESP']) utils.Execute(['sync']) utils.Execute(['mkfs.ext2', '-L', 'INSTALLER', '/dev/sdb2']) utils.Execute(['sync']) utils.Execute(['mkdir', '-vp', 'iso', 'installer', 'boot']) utils.Execute(['mount', '-o', 'ro,loop', '-t', 'iso9660', iso_file, 'iso']) utils.Execute(['mount', '-t', 'vfat', '/dev/sdb1', 'boot']) utils.Execute(['mount', '-t', 'ext2', '/dev/sdb2', 'installer']) utils.Execute(['rsync', '-Pav', 'iso/EFI', 'iso/images', 'boot/']) utils.Execute(['cp', iso_file, 'installer/']) utils.Execute(['cp', ks_cfg, 'installer/']) # Modify boot config. with open('boot/EFI/BOOT/grub.cfg', 'r+') as f: oldcfg = f.read() cfg = re.sub(r'-l .RHEL.*', r"""-l 'ESP'""", oldcfg) cfg = re.sub(r'timeout=60', 'timeout=1', cfg) cfg = re.sub(r'set default=.*', 'set default="0"', cfg) cfg = re.sub(r'load_video\n', r'serial --speed=38400 --unit=0 --word=8 ' '--parity=no\nterminal_input serial\nterminal_output ' 'serial\n', cfg) # Change boot args. args = ' '.join([ 'text', 'ks=hd:LABEL=INSTALLER:/%s' % ks_cfg, 'console=ttyS0,38400n8', 'inst.sshd=1', 'inst.gpt' ]) # Tell Anaconda not to store its logs in the installed image, # unless requested to keep them for debugging. if not savelogs: args += ' inst.nosave=all' cfg = re.sub(r'inst\.stage2.*', r'\g<0> %s' % args, cfg) if release in ['centos7', 'rhel7', 'oraclelinux7']: cfg = re.sub(r'LABEL=[^ :]+', 'LABEL=INSTALLER', cfg) # Print out a the modifications. diff = difflib.Differ().compare(oldcfg.splitlines(1), cfg.splitlines(1)) logging.info('Modified grub.cfg:\n%s' % '\n'.join(diff)) f.seek(0) f.write(cfg) f.truncate() logging.info("Creating boot path file\n") utils.Execute(['mkdir', '-p', 'boot/EFI/Google/gsetup']) with open('boot/EFI/Google/gsetup/boot', 'w') as g: g.write("\\EFI\\BOOT\\BOOTX64.EFI\n") utils.Execute(['umount', 'installer']) utils.Execute(['umount', 'iso']) utils.Execute(['umount', 'boot'])
def main(): # Get Parameters. build_date = utils.GetMetadataAttribute('build_date', raise_on_not_found=True) debian_cloud_images_version = utils.GetMetadataAttribute( 'debian_cloud_images_version', raise_on_not_found=True) debian_version = utils.GetMetadataAttribute('debian_version', raise_on_not_found=True) outs_path = utils.GetMetadataAttribute('daisy-outs-path', raise_on_not_found=True) logging.info('debian-cloud-images version: %s' % debian_cloud_images_version) logging.info('debian version: %s' % debian_version) # force an apt-get update before next install utils.AptGetInstall.first_run = True utils.AptGetInstall(['fai-server', 'fai-setup-storage']) # Download and setup debian's debian-cloud-images scripts. url_params = { 'project': 'debian-cloud-images', 'version': debian_cloud_images_version, } url_params['filename'] = '%(project)s-%(version)s' % url_params url = ('https://salsa.debian.org/cloud-team/' '%(project)s/-/archive/%(version)s/%(filename)s.tar.gz' % url_params) logging.info('Downloading %(project)s at version %(version)s', url_params) urllib.request.urlretrieve(url, 'fci.tar.gz') with tarfile.open('fci.tar.gz') as tar: tar.extractall() logging.info('Downloaded and extracted %s.', url) # Copy our classes to the FAI config space work_dir = url_params['filename'] config_space = os.getcwd() + work_dir + '/config_space/' mycopytree('/files/fai_config', config_space) # Remove failing test method for now. os.remove(config_space + 'hooks/tests.CLOUD') # Config fai-tool fai_classes = [ 'DEBIAN', 'CLOUD', 'GCE', 'GCE_SDK', 'AMD64', 'GRUB_CLOUD_AMD64', 'LINUX_IMAGE_CLOUD', 'GCE_SPECIFIC', 'GCE_CLEAN' ] if debian_version == 'buster': fai_classes += ['BUSTER', 'BACKPORTS'] elif debian_version == 'bullseye': fai_classes += ['BULLSEYE'] elif debian_version == 'sid': fai_classes += ['SID'] image_size = '10G' disk_name = 'disk.raw' # Run fai-tool. cmd = [ 'fai-diskimage', '--verbose', '--hostname', 'debian', '--class', ','.join(fai_classes), '--size', image_size, '--cspace', config_space, disk_name ] logging.info('Starting build in %s with params: %s', work_dir, ' '.join(cmd)) utils.Execute(cmd, cwd=work_dir, capture_output=True) # Packs a gzipped tar file with disk.raw inside disk_tar_gz = 'debian-{}-{}.tar.gz'.format(debian_version, build_date) logging.info('Compressing it into tarball %s', disk_tar_gz) tar = tarfile.open(disk_tar_gz, 'w:gz') tar.add('%s/disk.raw' % work_dir, arcname='disk.raw') tar.close() # Upload tar. image_dest = os.path.join(outs_path, 'root.tar.gz') logging.info('Saving %s to %s', disk_tar_gz, image_dest) utils.UploadFile(disk_tar_gz, image_dest)
def main(): # Get Parameters. bvz_manifest = utils.GetMetadataAttribute( 'bootstrap_vz_manifest', raise_on_not_found=True) bvz_version = utils.GetMetadataAttribute( 'bootstrap_vz_version', raise_on_not_found=True) repo = utils.GetMetadataAttribute('google_cloud_repo', raise_on_not_found=True).strip() image_dest = utils.GetMetadataAttribute('image_dest', raise_on_not_found=True) outs_path = utils.GetMetadataAttribute('daisy-outs-path', raise_on_not_found=True) if repo not in REPOS: raise ValueError( 'Metadata "google_cloud_repo" must be one of %s.' % REPOS) logging.info('Bootstrap_vz manifest: %s' % bvz_manifest) logging.info('Bootstrap_vz version: %s' % bvz_version) logging.info('Google Cloud repo: %s' % repo) # Download and setup bootstrap_vz. bvz_url = 'https://github.com/andsens/bootstrap-vz/archive/%s.zip' bvz_url %= bvz_version bvz_zip_dir = 'bvz_zip' logging.info('Downloading bootstrap-vz at commit %s' % bvz_version) urllib.urlretrieve(bvz_url, 'bvz.zip') with zipfile.ZipFile('bvz.zip', 'r') as z: z.extractall(bvz_zip_dir) logging.info('Downloaded and extracted %s to bvz.zip.' % bvz_url) bvz_zip_contents = [d for d in os.listdir(bvz_zip_dir)] bvz_zip_subdir = os.path.join(bvz_zip_dir, bvz_zip_contents[0]) utils.Execute(['mv', bvz_zip_subdir, BVZ_DIR]) logging.info('Moved bootstrap_vz from %s to %s.' % (bvz_zip_subdir, BVZ_DIR)) bvz_bin = os.path.join(BVZ_DIR, 'bootstrap-vz') utils.MakeExecutable(bvz_bin) logging.info('Made %s executable.' % bvz_bin) bvz_manifest_file = os.path.join(BVZ_DIR, 'manifests', bvz_manifest) # Inject Google Cloud test repo plugin if using staging or unstable repos. # This is used to test new package releases in images. if repo != 'stable': logging.info('Adding Google Cloud test repos plugin for bootstrapvz.') repo_plugin_dir = '/files/google_cloud_test_repos' bvz_plugins = os.path.join(BVZ_DIR, 'bootstrapvz', 'plugins') shutil.move(repo_plugin_dir, bvz_plugins) with open(bvz_manifest_file, 'r+') as manifest_file: manifest_data = yaml.load(manifest_file) manifest_plugins = manifest_data['plugins'] manifest_plugins['google_cloud_test_repos'] = {repo: True} manifest_yaml = yaml.dump(manifest_data, default_flow_style=False) manifest_file.write(manifest_yaml) # Run bootstrap_vz build. cmd = [bvz_bin, '--debug', bvz_manifest_file] logging.info('Starting build in %s with params: %s' % (BVZ_DIR, str(cmd))) utils.Execute(cmd, cwd=BVZ_DIR) # Upload tar. image_tar_gz = '/target/disk.tar.gz' if os.path.exists(image_tar_gz): logging.info('Saving %s to %s' % (image_tar_gz, image_dest)) utils.UploadFile(image_tar_gz, image_dest) # Create and upload the synopsis of the image. logging.info('Creating image synopsis.') synopsis = {} packages = collections.OrderedDict() _, output = utils.Execute(['dpkg-query', '-W'], capture_output=True) for line in output.split('\n')[:-1]: # Last line is an empty line. parts = line.split() packages[parts[0]] = parts[1] synopsis['installed_packages'] = packages with open('/tmp/synopsis.json', 'w') as f: f.write(json.dumps(synopsis)) logging.info('Uploading image synopsis.') synopsis_dest = os.path.join(outs_path, 'synopsis.json') utils.UploadFile('/tmp/synopsis.json', synopsis_dest)
def main(): # Get Parameters repo = utils.GetMetadataAttribute('google_cloud_repo', raise_on_not_found=True) release = utils.GetMetadataAttribute('el_release', raise_on_not_found=True) savelogs = utils.GetMetadataAttribute('el_savelogs', raise_on_not_found=False) == 'true' byos = utils.GetMetadataAttribute('rhel_byos', raise_on_not_found=False) == 'true' sap = utils.GetMetadataAttribute('rhel_sap', raise_on_not_found=False) == 'true' nge = utils.GetMetadataAttribute('new_guest', raise_on_not_found=False) == 'true' logging.info('EL Release: %s' % release) logging.info('Google Cloud repo: %s' % repo) logging.info('Build working directory: %s' % os.getcwd()) iso_file = '/files/installer.iso' # Necessary libs and tools to build the installer disk. utils.AptGetInstall(['extlinux', 'rsync']) # Build the kickstart file. uefi = False ks_content = ks_helpers.BuildKsConfig(release, repo, byos, sap, uefi, nge) ks_cfg = 'ks.cfg' utils.WriteFile(ks_cfg, ks_content) # Write the installer disk. Write extlinux MBR, create partition, # copy installer ISO and ISO boot files over. logging.info('Writing installer disk.') utils.Execute(['parted', '/dev/sdb', 'mklabel', 'msdos']) utils.Execute(['sync']) utils.Execute(['parted', '/dev/sdb', 'mkpart', 'primary', '1MB', '100%']) utils.Execute(['sync']) utils.Execute(['parted', '/dev/sdb', 'set', '1', 'boot', 'on']) utils.Execute(['sync']) utils.Execute(['dd', 'if=/usr/lib/EXTLINUX/mbr.bin', 'of=/dev/sdb']) utils.Execute(['sync']) utils.Execute(['mkfs.ext2', '-L', 'INSTALLER', '/dev/sdb1']) utils.Execute(['sync']) utils.Execute(['mkdir', 'iso', 'installer']) utils.Execute(['mount', '-o', 'ro,loop', '-t', 'iso9660', iso_file, 'iso']) utils.Execute(['mount', '-t', 'ext2', '/dev/sdb1', 'installer']) utils.Execute( ['rsync', '-Pav', 'iso/images', 'iso/isolinux', 'installer/']) utils.Execute(['cp', iso_file, 'installer/']) utils.Execute(['cp', ks_cfg, 'installer/']) # Modify boot files on installer disk. utils.Execute(['mv', 'installer/isolinux', 'installer/extlinux']) utils.Execute([ 'mv', 'installer/extlinux/isolinux.cfg', 'installer/extlinux/extlinux.conf' ]) # Modify boot config. with open('installer/extlinux/extlinux.conf', 'r+') as f: oldcfg = f.read() cfg = re.sub(r'^default.*', r'default linux', oldcfg, count=1) # Change boot args. args = ' '.join([ 'text', 'ks=hd:/dev/sda1:/%s' % ks_cfg, 'console=ttyS0,38400n8', 'loglevel=debug' ]) # Tell Anaconda not to store its logs in the installed image, # unless requested to keep them for debugging. if not savelogs: args += ' inst.nosave=all' cfg = re.sub(r'append initrd=initrd\.img.*', r'\g<0> %s' % args, cfg) # Change labels to explicit partitions. if release.startswith(('centos7', 'rhel7', 'rhel-7', 'oraclelinux7', 'centos8', 'rhel8')): cfg = re.sub(r'LABEL=[^ ]+', 'LABEL=INSTALLER', cfg) # Print out a the modifications. diff = difflib.Differ().compare(oldcfg.splitlines(1), cfg.splitlines(1)) logging.info('Modified extlinux.conf:\n%s' % '\n'.join(diff)) f.seek(0) f.write(cfg) f.truncate() # Activate extlinux. utils.Execute(['extlinux', '--install', 'installer/extlinux'])
def DistroSpecific(g): ubu_release = utils.GetMetadataAttribute('ubuntu_release') install_gce = utils.GetMetadataAttribute('install_gce_packages') # Remove any hard coded DNS settings in resolvconf. if ubu_release != 'bionic': logging.info('Resetting resolvconf base.') g.sh('echo "" > /etc/resolvconf/resolv.conf.d/base') # Try to reset the network to DHCP. if ubu_release == 'trusty': g.write('/etc/network/interfaces', trusty_network) elif ubu_release == 'xenial': g.write('/etc/network/interfaces', xenial_network) if install_gce == 'true': g.command(['apt-get', 'update']) logging.info('Installing cloud-init.') g.sh('DEBIAN_FRONTEND=noninteractive apt-get install -y' ' --no-install-recommends cloud-init') # Try to remove azure or aws configs so cloud-init has a chance. g.sh('rm -f /etc/cloud/cloud.cfg.d/*azure*') g.sh('rm -f /etc/cloud/cloud.cfg.d/*curtin*') g.sh('rm -f /etc/cloud/cloud.cfg.d/*waagent*') g.sh('rm -f /etc/cloud/cloud.cfg.d/*walinuxagent*') g.sh('rm -f /etc/cloud/cloud.cfg.d/*aws*') g.sh('rm -f /etc/cloud/cloud.cfg.d/*amazon*') if ubu_release == 'bionic': g.sh('rm -f /etc/netplan/*') logging.debug(g.sh('cloud-init clean')) remove_azure_agents(g) g.write('/etc/apt/sources.list.d/partner.list', partner_list.format(ubu_release=ubu_release)) g.write('/etc/cloud/cloud.cfg.d/91-gce-system.cfg', gce_system) # Use host machine as http proxy so cloud-init can access GCE API with open('/etc/tinyproxy/tinyproxy.conf', 'w') as cfg: cfg.write(tinyproxy_cfg) utils.Execute(['/etc/init.d/tinyproxy', 'restart']) default_gw = g.sh("ip route | awk '/default/ { printf $3 }'") logging.debug( g.sh('http_proxy="http://%s:8888" cloud-init -d init' % default_gw)) logging.info('Installing GCE packages.') g.command(['apt-get', 'update']) g.sh( 'DEBIAN_FRONTEND=noninteractive apt-get install -y' ' --no-install-recommends gce-compute-image-packages google-cloud-sdk' ) # Update grub config to log to console. g.command([ 'sed', '-i', r's#^\(GRUB_CMDLINE_LINUX=".*\)"$#\1 console=ttyS0,38400n8"#', '/etc/default/grub' ]) g.command(['update-grub2'])