def DistroSpecific(g):
    el_release = utils.GetMetadataAttribute('el_release')
    install_gce = utils.GetMetadataAttribute('install_gce_packages')
    rhel_license = utils.GetMetadataAttribute('use_rhel_gce_license')

    # This must be performed prior to making network calls from the guest.
    # Otherwise, if /etc/resolv.conf is present, and has an immutable attribute,
    # guestfs will fail with:
    #
    #   rename: /sysroot/etc/resolv.conf to
    #     /sysroot/etc/i9r7obu6: Operation not permitted
    utils.common.ClearEtcResolv(g)

    if rhel_license == 'true':
        if 'Red Hat' in g.cat('/etc/redhat-release'):
            g.command(['yum', 'remove', '-y', '*rhui*'])
            logging.info('Adding in GCE RHUI package.')
            g.write('/etc/yum.repos.d/google-cloud.repo',
                    repo_compute % el_release)
            yum_install(g, 'google-rhui-client-rhel' + el_release)

    if install_gce == 'true':
        logging.info('Installing GCE packages.')
        g.write('/etc/yum.repos.d/google-cloud.repo',
                repo_compute % el_release)
        if el_release == '6':
            if 'CentOS' in g.cat('/etc/redhat-release'):
                logging.info('Installing CentOS SCL.')
                g.command(['rm', '-f', '/etc/yum.repos.d/CentOS-SCL.repo'])
                yum_install(g, 'centos-release-scl')
            # Install Google Cloud SDK from the upstream tar and create links for the
            # python27 SCL environment.
            logging.info('Installing python27 from SCL.')
            yum_install(g, 'python27')
            g.command(
                ['scl', 'enable', 'python27', 'pip2.7 install --upgrade pip'])
            g.command([
                'scl', 'enable', 'python27',
                'pip2.7 install --upgrade google_compute_engine'
            ])

            logging.info('Installing Google Cloud SDK from tar.')
            sdk_base_url = 'https://dl.google.com/dl/cloudsdk/channels/rapid'
            sdk_base_tar = '%s/google-cloud-sdk.tar.gz' % sdk_base_url
            tar = utils.HttpGet(sdk_base_tar)
            g.write('/tmp/google-cloud-sdk.tar.gz', tar)
            g.command(
                ['tar', 'xzf', '/tmp/google-cloud-sdk.tar.gz', '-C', '/tmp'])
            sdk_version = g.cat('/tmp/google-cloud-sdk/VERSION').strip()

            logging.info('Getting Cloud SDK Version %s', sdk_version)
            sdk_version_tar = 'google-cloud-sdk-%s-linux-x86_64.tar.gz' % sdk_version
            sdk_version_tar_url = '%s/downloads/%s' % (sdk_base_url,
                                                       sdk_version_tar)
            logging.info('Getting versioned Cloud SDK tar file from %s',
                         sdk_version_tar_url)
            tar = utils.HttpGet(sdk_version_tar_url)
            sdk_version_tar_file = os.path.join('/tmp', sdk_version_tar)
            g.write(sdk_version_tar_file, tar)
            g.mkdir_p('/usr/local/share/google')
            g.command([
                'tar', 'xzf', sdk_version_tar_file, '-C',
                '/usr/local/share/google', '--no-same-owner'
            ])

            logging.info('Creating CloudSDK SCL symlinks.')
            sdk_bin_path = '/usr/local/share/google/google-cloud-sdk/bin'
            g.ln_s(os.path.join(sdk_bin_path, 'git-credential-gcloud.sh'),
                   os.path.join('/usr/bin', 'git-credential-gcloud.sh'))
            for binary in ['bq', 'gcloud', 'gsutil']:
                binary_path = os.path.join(sdk_bin_path, binary)
                new_bin_path = os.path.join('/usr/bin', binary)
                bin_str = '#!/bin/bash\nsource /opt/rh/python27/enable\n%s $@' % \
                    binary_path
                g.write(new_bin_path, bin_str)
                g.chmod(0o755, new_bin_path)
        else:
            g.write_append('/etc/yum.repos.d/google-cloud.repo',
                           repo_sdk % el_release)
            yum_install(g, 'google-cloud-sdk')

        if el_release == '8':
            yum_install(g, 'google-compute-engine',
                        'python3-google-compute-engine')
        else:
            yum_install(g, 'google-compute-engine',
                        'python-google-compute-engine')

    logging.info('Updating initramfs')
    for kver in g.ls('/lib/modules'):
        if not g.exists(os.path.join('/lib/modules', kver, 'modules.dep')):
            g.command(['depmod', kver])
        if el_release == '6':
            # Version 6 doesn't have option --kver
            g.command(['dracut', '-v', '-f', kver])
        else:
            g.command(['dracut', '--stdlog=1', '-f', '--kver', kver])

    logging.info('Update grub configuration')
    if el_release == '6':
        # Version 6 doesn't have grub2, file grub.conf needs to be updated by hand
        g.write('/tmp/grub_gce_generated', grub_cfg)
        g.sh(r'grep -P "^[\t ]*initrd|^[\t ]*root|^[\t ]*kernel|^[\t ]*title" '
             r'/boot/grub/grub.conf >> /tmp/grub_gce_generated;'
             r'sed -i "s/console=ttyS0[^ ]*//g" /tmp/grub_gce_generated;'
             r'sed -i "/^[\t ]*kernel/s/$/ console=ttyS0,38400n8/" '
             r'/tmp/grub_gce_generated;'
             r'mv /tmp/grub_gce_generated /boot/grub/grub.conf')
    else:
        g.write('/etc/default/grub', grub2_cfg)
        g.command(['grub2-mkconfig', '-o', '/boot/grub2/grub.cfg'])

    # Reset network for DHCP.
    logging.info('Resetting network to DHCP for eth0.')
    # Remove NetworkManager-config-server if it's present. The package configures
    # NetworkManager to *not* use DHCP.
    #  https://access.redhat.com/solutions/894763
    g.command(['yum', 'remove', '-y', 'NetworkManager-config-server'])
    g.write('/etc/sysconfig/network-scripts/ifcfg-eth0', ifcfg_eth0)
Beispiel #2
0
def main():
    # Get parameters from instance metadata.
    image_id = utils.GetMetadataAttribute('image_id')
    image_name = utils.GetMetadataAttribute('image_name')
    image_family = utils.GetMetadataAttribute('image_family')
    distribution = utils.GetMetadataAttribute('distribution',
                                              raise_on_not_found=True)
    uefi = utils.GetMetadataAttribute('uefi', 'false').lower() == 'true'
    outs_path = utils.GetMetadataAttribute('daisy-outs-path')

    logging.info('Creating upload metadata of the image and packages.')

    utc_time = datetime.datetime.now(datetime.timezone.utc)
    image_version = image_name.rsplit("v")[-1]
    publish_date = utc_time.astimezone().isoformat()
    image = {
        'id': image_id,
        'name': image_name,
        'family': image_family,
        'version': image_version,
        'publish_date': publish_date,
        'packages': [],
    }

    # All the guest environment packages maintained by guest-os team.
    guest_packages = [
        'google-compute-engine',
        'google-compute-engine-oslogin',
        'google-guest-agent',
        'google-osconfig-agent',
        'gce-disk-expand',
    ]

    # This assumes that:
    # 1. /dev/sdb1 is the EFI system partition.
    # 2. /dev/sdb2 is the root mount for the installed system.
    # Except for debian 10, which has out-of-order partitions.
    if uefi and 'debian-10' not in image_family:
        mount_disk = '/dev/sdb2'
    else:
        mount_disk = '/dev/sdb1'
    subprocess.run(['mount', mount_disk, '/mnt'], check=False)
    logging.info('Mount %s device to /mnt', mount_disk)

    if distribution == 'enterprise_linux':
        # chroot prevents access to /dev/random and /dev/urandom (as designed).
        # The rpm required those random bits to initialize GnuTLS otherwise
        # error: Failed to initialize NSS library.
        subprocess.run(['mount', '-o', 'bind', '/dev', '/mnt/dev'],
                       check=False)

    if distribution == 'debian':
        #  This package is debian-only.
        guest_packages.append('google-cloud-packages-archive-keyring')
        cmd_prefix = [
            'chroot', '/mnt', 'dpkg-query', '-W', '--showformat',
            '${Package}\n\n${Version}\n${Git}'
        ]
    elif distribution == 'enterprise_linux':
        cmd_prefix = [
            'chroot', '/mnt', 'rpm', '-q', '--queryformat',
            '%{NAME}\n%{EPOCH}\n%{VERSION}-%{RELEASE}\n%{VCS}'
        ]
    else:
        logging.error('Unknown Linux distribution.')
        return

    for package in guest_packages:
        try:
            process = subprocess.run(cmd_prefix + [package],
                                     capture_output=True,
                                     check=True)
        except subprocess.CalledProcessError as e:
            logging.info('failed to execute cmd: %s stdout: %s stderr: %s', e,
                         e.stdout, e.stderr)
            continue

        stdout = process.stdout.decode()

        try:
            package, epoch, version, commit_hash = stdout.split('\n', 3)
        except ValueError:
            logging.info('command result was malformed: %s', stdout)
            continue

        md = make_pkg_metadata(package, version, epoch, commit_hash)
        image['packages'].append(md)

    # Write image metadata to a file.
    with tempfile.NamedTemporaryFile(mode='w', dir='/tmp', delete=False) as f:
        f.write(json.dumps(image))

    # We upload the result to the daisy outs path as well, to aid in
    # troubleshooting.
    logging.info('Uploading image metadata to daisy outs path.')
    try:
        utils.UploadFile(f.name, outs_path + "/metadata.json")
    except Exception as e:
        logging.error('Failed uploading metadata file %s', e)
        return

    logging.success('Export metadata was successful!')
def main():
    # Get Parameters.
    bvz_manifest = utils.GetMetadataAttribute('bootstrap_vz_manifest',
                                              raise_on_not_found=True)
    bvz_version = utils.GetMetadataAttribute('bootstrap_vz_version',
                                             raise_on_not_found=True)
    repo = utils.GetMetadataAttribute('google_cloud_repo',
                                      raise_on_not_found=True).strip()
    image_dest = utils.GetMetadataAttribute('image_dest',
                                            raise_on_not_found=True)
    outs_path = utils.GetMetadataAttribute('daisy-outs-path',
                                           raise_on_not_found=True)
    if repo not in REPOS:
        raise ValueError('Metadata "google_cloud_repo" must be one of %s.' %
                         REPOS)

    logging.info('Bootstrap_vz manifest: %s' % bvz_manifest)
    logging.info('Bootstrap_vz version: %s' % bvz_version)
    logging.info('Google Cloud repo: %s' % repo)

    # Download and setup bootstrap_vz.
    bvz_url = 'https://github.com/andsens/bootstrap-vz/archive/%s.zip'
    bvz_url %= bvz_version
    bvz_zip_dir = 'bvz_zip'
    logging.info('Downloading bootstrap-vz at commit %s' % bvz_version)
    urllib.request.urlretrieve(bvz_url, 'bvz.zip')
    with zipfile.ZipFile('bvz.zip', 'r') as z:
        z.extractall(bvz_zip_dir)
    logging.info('Downloaded and extracted %s to bvz.zip.' % bvz_url)
    bvz_zip_contents = [d for d in os.listdir(bvz_zip_dir)]
    bvz_zip_subdir = os.path.join(bvz_zip_dir, bvz_zip_contents[0])
    utils.Execute(['mv', bvz_zip_subdir, BVZ_DIR])
    logging.info('Moved bootstrap_vz from %s to %s.' %
                 (bvz_zip_subdir, BVZ_DIR))
    bvz_bin = os.path.join(BVZ_DIR, 'bootstrap-vz')
    utils.MakeExecutable(bvz_bin)
    logging.info('Made %s executable.' % bvz_bin)
    bvz_manifest_file = os.path.join(BVZ_DIR, 'manifests', bvz_manifest)

    # Inject Google Cloud test repo plugin if using staging or unstable repos.
    # This is used to test new package releases in images.
    if repo != 'stable':
        logging.info('Adding Google Cloud test repos plugin for bootstrapvz.')
        repo_plugin_dir = '/files/google_cloud_test_repos'
        bvz_plugins = os.path.join(BVZ_DIR, 'bootstrapvz', 'plugins')
        shutil.move(repo_plugin_dir, bvz_plugins)

        with open(bvz_manifest_file, 'r+') as manifest_file:
            manifest_data = yaml.load(manifest_file)
            manifest_plugins = manifest_data['plugins']
            manifest_plugins['google_cloud_test_repos'] = {repo: True}
            manifest_yaml = yaml.dump(manifest_data, default_flow_style=False)
            manifest_file.write(manifest_yaml)

    # Run bootstrap_vz build.
    cmd = [bvz_bin, '--debug', bvz_manifest_file]
    logging.info('Starting build in %s with params: %s' % (BVZ_DIR, str(cmd)))
    utils.Execute(cmd, cwd=BVZ_DIR)

    # Upload tar.
    image_tar_gz = '/target/disk.tar.gz'
    if os.path.exists(image_tar_gz):
        logging.info('Saving %s to %s' % (image_tar_gz, image_dest))
        utils.UploadFile(image_tar_gz, image_dest)

    # Create and upload the synopsis of the image.
    logging.info('Creating image synopsis.')
    synopsis = {}
    packages = collections.OrderedDict()
    _, output = utils.Execute(['dpkg-query', '-W'], capture_output=True)
    for line in output.split('\n')[:-1]:  # Last line is an empty line.
        parts = line.split()
        packages[parts[0]] = parts[1]
    synopsis['installed_packages'] = packages
    with open('/tmp/synopsis.json', 'w') as f:
        f.write(json.dumps(synopsis))
    logging.info('Uploading image synopsis.')
    synopsis_dest = os.path.join(outs_path, 'synopsis.json')
    utils.UploadFile('/tmp/synopsis.json', synopsis_dest)
Beispiel #4
0
def main():
  # Get Parameters
  repo = utils.GetMetadataAttribute('google_cloud_repo',
                    raise_on_not_found=True)
  release = utils.GetMetadataAttribute('el_release', raise_on_not_found=True)
  daisy_logs_path = utils.GetMetadataAttribute('daisy-logs-path',
                                               raise_on_not_found=True)
  savelogs = utils.GetMetadataAttribute('el_savelogs') == 'true'
  byos = utils.GetMetadataAttribute('rhel_byos') == 'true'
  sap = utils.GetMetadataAttribute('rhel_sap') == 'true'
  uefi = utils.GetMetadataAttribute('rhel_uefi') == 'true'

  logging.info('EL Release: %s' % release)
  logging.info('Google Cloud repo: %s' % repo)
  logging.info('Build working directory: %s' % os.getcwd())

  iso_file = '/files/installer.iso'

  # Necessary libs and tools to build the installer disk.
  utils.AptGetInstall(['dosfstools', 'rsync'])

  # Build the kickstart file.
  ks_content = ks_helpers.BuildKsConfig(release, repo, byos, sap, uefi)
  ks_cfg = 'ks.cfg'
  utils.WriteFile(ks_cfg, ks_content)
  # Save the generated kickstart file to the build logs.
  utils.UploadFile(ks_cfg, '%s/ks.cfg' % daisy_logs_path)

  # Write the installer disk. Write GPT label, create partition,
  # copy installer boot files over.
  logging.info('Writing installer disk.')
  utils.Execute(['parted', '/dev/sdb', 'mklabel', 'gpt'])
  utils.Execute(['sync'])
  utils.Execute(['parted', '/dev/sdb', 'mkpart', 'primary', 'fat32', '1MB',
                 '1024MB'])
  utils.Execute(['sync'])
  utils.Execute(['parted', '/dev/sdb', 'mkpart', 'primary', 'ext2', '1024MB',
                 '100%'])
  utils.Execute(['sync'])
  utils.Execute(['parted', '/dev/sdb', 'set', '1', 'boot', 'on'])
  utils.Execute(['sync'])
  utils.Execute(['parted', '/dev/sdb', 'set', '1', 'esp', 'on'])
  utils.Execute(['sync'])
  utils.Execute(['mkfs.vfat', '-F', '32', '/dev/sdb1'])
  utils.Execute(['sync'])
  utils.Execute(['fatlabel', '/dev/sdb1', 'ESP'])
  utils.Execute(['sync'])
  utils.Execute(['mkfs.ext2', '-L', 'INSTALLER', '/dev/sdb2'])
  utils.Execute(['sync'])

  utils.Execute(['mkdir', '-vp', 'iso', 'installer', 'boot'])
  utils.Execute(['mount', '-o', 'ro,loop', '-t', 'iso9660', iso_file, 'iso'])
  utils.Execute(['mount', '-t', 'vfat', '/dev/sdb1', 'boot'])
  utils.Execute(['mount', '-t', 'ext2', '/dev/sdb2', 'installer'])
  utils.Execute(['rsync', '-Pav', 'iso/EFI', 'iso/images', 'boot/'])
  utils.Execute(['cp', iso_file, 'installer/'])
  utils.Execute(['cp', ks_cfg, 'installer/'])

  # Modify boot config.
  with open('boot/EFI/BOOT/grub.cfg', 'r+') as f:
    oldcfg = f.read()
    cfg = re.sub(r'-l .RHEL.*', r"""-l 'ESP'""", oldcfg)
    cfg = re.sub(r'timeout=60', 'timeout=1', cfg)
    cfg = re.sub(r'set default=.*', 'set default="0"', cfg)
    cfg = re.sub(r'load_video\n',
           r'serial --speed=38400 --unit=0 --word=8 --parity=no\n'
           'terminal_input serial\nterminal_output serial\n', cfg)

    # Change boot args.
    args = ' '.join([
      'text', 'ks=hd:LABEL=INSTALLER:/%s' % ks_cfg,
      'console=ttyS0,38400n8', 'inst.gpt', 'loglevel=debug'
    ])

    # Tell Anaconda not to store its logs in the installed image,
    # unless requested to keep them for debugging.
    if not savelogs:
      args += ' inst.nosave=all'
    cfg = re.sub(r'inst\.stage2.*', r'\g<0> %s' % args, cfg)

    # Change labels to explicit partitions.
    if release.startswith(('centos7', 'rhel7', 'rhel-7', 'oraclelinux7',
                           'centos8', 'rhel8', 'rhel-8')):
      cfg = re.sub(r'LABEL=[^ ]+', 'LABEL=INSTALLER', cfg)

    # Print out a the modifications.
    diff = difflib.Differ().compare(
        oldcfg.splitlines(1),
        cfg.splitlines(1))
    logging.info('Modified grub.cfg:\n%s' % '\n'.join(diff))

    f.seek(0)
    f.write(cfg)
    f.truncate()

  utils.Execute(['umount', 'installer'])
  utils.Execute(['umount', 'iso'])
  utils.Execute(['umount', 'boot'])
Beispiel #5
0
def main():
  # Get Parameters.
  build_date = utils.GetMetadataAttribute(
      'build_date', raise_on_not_found=True)
  debian_cloud_images_version = utils.GetMetadataAttribute(
      'debian_cloud_images_version', raise_on_not_found=True)
  debian_version = utils.GetMetadataAttribute(
      'debian_version', raise_on_not_found=True)
  image_dest = utils.GetMetadataAttribute('image_dest',
      raise_on_not_found=True)
  outs_path = utils.GetMetadataAttribute('daisy-outs-path',
      raise_on_not_found=True)

  logging.info('debian-cloud-images version: %s' % debian_cloud_images_version)
  logging.info('debian version: %s' % debian_version)

  # force an apt-get update before next install
  utils.AptGetInstall.first_run = True

  debian_host_version = utils.Execute(['cat', '/etc/debian_version'],
          capture_output=True)
  # the FAI's version in stretch does not satisfy our need, so the version from
  # stretch-backports is needed.
  if debian_host_version[1].startswith('9'):
      utils.AptGetInstall(['fai-server', 'fai-setup-storage'],
              'stretch-backports')
  else:
      utils.AptGetInstall(['fai-server', 'fai-setup-storage'])

  # Download and setup debian's debian-cloud-images scripts.
  url_params = {
      'project': 'debian-cloud-images',
      'version': debian_cloud_images_version,
  }
  url_params['filename'] = '%(project)s-%(version)s' % url_params

  url = "https://salsa.debian.org/cloud-team/" + \
      "%(project)s/-/archive/%(version)s/%(filename)s.tar.gz" % url_params
  logging.info('Downloading %(project)s at version %(version)s' % url_params)
  urllib.request.urlretrieve(url, 'fci.tar.gz')
  with tarfile.open('fci.tar.gz') as tar:
    tar.extractall()
  logging.info('Downloaded and extracted %s.' % url)

  # Config fai-tool
  work_dir = url_params['filename']
  fai_classes = ['DEBIAN', 'CLOUD', 'GCE', 'GCE_SDK', 'AMD64',
          'GRUB_CLOUD_AMD64', 'LINUX_IMAGE_CLOUD']
  if debian_version == 'stretch':
      fai_classes += ['STRETCH', 'BACKPORTS', 'BACKPORTS_LINUX']
  elif debian_version == 'buster':
      fai_classes += ['BUSTER']
  elif debian_version == 'sid':
      fai_classes += ['SID']
  image_size = '10G'
  disk_name = 'disk.raw'
  config_space = os.getcwd() + work_dir + '/config_space/'

  # Copy GCE_SPECIFIC fai class
  utils.Execute(['cp', '/files/fai_config/packages/GCE_SPECIFIC',
      config_space + 'package_config/GCE_SPECIFIC'])
  fai_classes += ['GCE_SPECIFIC']

  # Run fai-tool.
  cmd = ['fai-diskimage', '--verbose', '--hostname', 'debian', '--class',
     ','.join(fai_classes), '--size', image_size, '--cspace', config_space,
     disk_name]
  logging.info('Starting build in %s with params: %s' % (
      work_dir, ' '.join(cmd))
  )
  utils.Execute(cmd, cwd=work_dir, capture_output=True)

  # Packs a gzipped tar file with disk.raw inside
  disk_tar_gz = 'debian-{}-{}.tar.gz'.format(debian_version, build_date)
  logging.info('Compressing it into tarball %s' % disk_tar_gz)
  tar = tarfile.open(disk_tar_gz, "w:gz")
  tar.add('%s/disk.raw' % work_dir, arcname="disk.raw")
  tar.close()

  # Upload tar.
  logging.info('Saving %s to %s' % (disk_tar_gz, image_dest))
  utils.UploadFile(disk_tar_gz, image_dest)

  # Create and upload the synopsis of the image.
  logging.info('Creating image synopsis.')
  synopsis = {}
  packages = collections.OrderedDict()
  _, output = utils.Execute(['dpkg-query', '-W'], capture_output=True)
  for line in output.split('\n')[:-1]:  # Last line is an empty line.
    parts = line.split()
    packages[parts[0]] = parts[1]
  synopsis['installed_packages'] = packages
  with open('/tmp/synopsis.json', 'w') as f:
    f.write(json.dumps(synopsis))
  logging.info('Uploading image synopsis.')
  synopsis_dest = os.path.join(outs_path, 'synopsis.json')
  utils.UploadFile('/tmp/synopsis.json', synopsis_dest)
def DistroSpecific(g):
  ubu_release = utils.GetMetadataAttribute('ubuntu_release')
  install_gce = utils.GetMetadataAttribute('install_gce_packages')

  # Remove any hard coded DNS settings in resolvconf.
  logging.info('Resetting resolvconf base.')
  g.sh('echo "" > /etc/resolvconf/resolv.conf.d/base')

  # Try to reset the network to DHCP.
  if ubu_release == 'trusty':
    g.write('/etc/network/interfaces', trusty_network)
  elif ubu_release == 'xenial':
    g.write('/etc/network/interfaces', xenial_network)

  if install_gce == 'true':
    g.command(['apt-get', 'update'])
    logging.info('Installing cloud-init.')
    g.sh(
        'DEBIAN_FRONTEND=noninteractive apt-get install -y'
        ' --no-install-recommends cloud-init')

    # Try to remove azure or aws configs so cloud-init has a chance.
    g.sh('rm -f /etc/cloud/cloud.cfg.d/*azure*')
    g.sh('rm -f /etc/cloud/cloud.cfg.d/*waagent*')
    g.sh('rm -f /etc/cloud/cloud.cfg.d/*walinuxagent*')
    g.sh('rm -f /etc/cloud/cloud.cfg.d/*aws*')
    g.sh('rm -f /etc/cloud/cloud.cfg.d/*amazon*')

    # Remove Azure agent.
    try:
      g.command(['apt-get', 'remove', '-y', '-f', 'waagent', 'walinuxagent'])
    except Exception as e:
      logging.debug(str(e))
      logging.warn('Could not uninstall Azure agent. Continuing anyway.')

    g.write(
        '/etc/apt/sources.list.d/partner.list',
        partner_list.format(ubu_release=ubu_release))

    g.write('/etc/cloud/cloud.cfg.d/91-gce-system.cfg', gce_system)

    # Use host machine as http proxy so cloud-init can access GCE API
    with open('/etc/tinyproxy/tinyproxy.conf', 'w') as cfg:
        cfg.write(tinyproxy_cfg)
    utils.Execute(['/etc/init.d/tinyproxy', 'restart'])
    default_gw = g.sh("ip route | awk '/default/ { printf $3 }'")
    logging.debug(
        g.sh('http_proxy="http://%s:8888" cloud-init -d init' % default_gw))

    logging.info('Installing GCE packages.')
    g.command(['apt-get', 'update'])
    g.sh(
        'DEBIAN_FRONTEND=noninteractive apt-get install -y'
        ' --no-install-recommends gce-compute-image-packages google-cloud-sdk')

  # Update grub config to log to console.
  g.command(
      ['sed', '-i',
      r's#^\(GRUB_CMDLINE_LINUX=".*\)"$#\1 console=ttyS0,38400n8"#',
      '/etc/default/grub'])

  g.command(['update-grub2'])
Beispiel #7
0
def DistroSpecific(g):
    ubu_release = utils.GetMetadataAttribute('ubuntu_release')
    install_gce = utils.GetMetadataAttribute('install_gce_packages')

    # Remove any hard coded DNS settings in resolvconf.
    if ubu_release != 'bionic':
        logging.info('Resetting resolvconf base.')
        g.sh('echo "" > /etc/resolvconf/resolv.conf.d/base')

    # Try to reset the network to DHCP.
    if ubu_release == 'trusty':
        g.write('/etc/network/interfaces', trusty_network)
    elif ubu_release == 'xenial':
        g.write('/etc/network/interfaces', xenial_network)

    if install_gce == 'true':
        utils.update_apt(g)
        logging.info('Installing cloud-init.')
        utils.install_apt_package(g, 'cloud-init')

        # Try to remove azure or aws configs so cloud-init has a chance.
        g.sh('rm -f /etc/cloud/cloud.cfg.d/*azure*')
        g.sh('rm -f /etc/cloud/cloud.cfg.d/*curtin*')
        g.sh('rm -f /etc/cloud/cloud.cfg.d/*waagent*')
        g.sh('rm -f /etc/cloud/cloud.cfg.d/*walinuxagent*')
        g.sh('rm -f /etc/cloud/cloud.cfg.d/*aws*')
        g.sh('rm -f /etc/cloud/cloud.cfg.d/*amazon*')
        if ubu_release == 'bionic':
            g.sh('rm -f /etc/netplan/*')
            logging.debug(g.sh('cloud-init clean'))

        remove_azure_agents(g)

        g.write('/etc/apt/sources.list.d/partner.list',
                partner_list.format(ubu_release=ubu_release))

        g.write('/etc/cloud/cloud.cfg.d/91-gce-system.cfg', gce_system)

        # Use host machine as http proxy so cloud-init can access GCE API
        with open('/etc/tinyproxy/tinyproxy.conf', 'w') as cfg:
            cfg.write(tinyproxy_cfg)
        utils.Execute(['/etc/init.d/tinyproxy', 'restart'])
        default_gw = g.sh("ip route | awk '/default/ { printf $3 }'")
        try:
            logging.debug(
                g.sh('http_proxy="http://%s:8888" cloud-init -d init' %
                     default_gw))
        except Exception as e:
            logging.debug('Failed to run cloud-init. Details: {}.'.format(e))
            raise RuntimeError(
                'Failed to run cloud-init. Connect to a shell in the original VM '
                'and ensure that the following command executes successfully: '
                'apt-get install -y --no-install-recommends cloud-init '
                '&& cloud-init -d init')
        logging.info('Installing GCE packages.')
        utils.update_apt(g)
        utils.install_apt_package(g, 'gce-compute-image-packages')
        utils.install_apt_package(g, 'google-cloud-sdk')
    # Update grub config to log to console.
    g.command([
        'sed', '-i',
        r's#^\(GRUB_CMDLINE_LINUX=".*\)"$#\1 console=ttyS0,38400n8"#',
        '/etc/default/grub'
    ])

    g.command(['update-grub2'])
def main():
    # Get Parameters
    repo = utils.GetMetadataAttribute('google_cloud_repo',
                                      raise_on_not_found=True)
    release = utils.GetMetadataAttribute('el_release', raise_on_not_found=True)
    daisy_logs_path = utils.GetMetadataAttribute('daisy-logs-path',
                                                 raise_on_not_found=True)
    savelogs = utils.GetMetadataAttribute('el_savelogs',
                                          raise_on_not_found=False) == 'true'
    byos = utils.GetMetadataAttribute('rhel_byos',
                                      raise_on_not_found=False) == 'true'
    sap = utils.GetMetadataAttribute('rhel_sap',
                                     raise_on_not_found=False) == 'true'

    logging.info('EL Release: %s' % release)
    logging.info('Google Cloud repo: %s' % repo)
    logging.info('Build working directory: %s' % os.getcwd())

    iso_file = '/files/installer.iso'

    # Necessary libs and tools to build the installer disk.
    utils.AptGetInstall(['extlinux', 'rsync'])

    # Build the kickstart file.
    uefi = False
    ks_content = ks_helpers.BuildKsConfig(release, repo, byos, sap, uefi)
    ks_cfg = 'ks.cfg'
    utils.WriteFile(ks_cfg, ks_content)
    # Save the generated kickstart file to the build logs.
    utils.UploadFile(ks_cfg, '%s/ks.cfg' % daisy_logs_path)

    # Write the installer disk. Write extlinux MBR, create partition,
    # copy installer ISO and ISO boot files over.
    logging.info('Writing installer disk.')
    utils.Execute(['parted', '/dev/sdb', 'mklabel', 'msdos'])
    utils.Execute(['sync'])
    utils.Execute(['parted', '/dev/sdb', 'mkpart', 'primary', '1MB', '100%'])
    utils.Execute(['sync'])
    utils.Execute(['parted', '/dev/sdb', 'set', '1', 'boot', 'on'])
    utils.Execute(['sync'])
    utils.Execute(['dd', 'if=/usr/lib/EXTLINUX/mbr.bin', 'of=/dev/sdb'])
    utils.Execute(['sync'])
    utils.Execute(['mkfs.ext2', '-L', 'INSTALLER', '/dev/sdb1'])
    utils.Execute(['sync'])
    utils.Execute(['mkdir', 'iso', 'installer'])
    utils.Execute(['mount', '-o', 'ro,loop', '-t', 'iso9660', iso_file, 'iso'])
    utils.Execute(['mount', '-t', 'ext2', '/dev/sdb1', 'installer'])
    utils.Execute(
        ['rsync', '-Pav', 'iso/images', 'iso/isolinux', 'installer/'])
    utils.Execute(['cp', iso_file, 'installer/'])
    utils.Execute(['cp', ks_cfg, 'installer/'])

    # Modify boot files on installer disk.
    utils.Execute(['mv', 'installer/isolinux', 'installer/extlinux'])
    utils.Execute([
        'mv', 'installer/extlinux/isolinux.cfg',
        'installer/extlinux/extlinux.conf'
    ])

    # Modify boot config.
    with open('installer/extlinux/extlinux.conf', 'r+') as f:
        oldcfg = f.read()
        cfg = re.sub(r'^default.*', r'default linux', oldcfg, count=1)

        # Change boot args.
        args = ' '.join([
            'text',
            'ks=hd:/dev/sda1:/%s' % ks_cfg, 'console=ttyS0,38400n8',
            'loglevel=debug'
        ])
        # Tell Anaconda not to store its logs in the installed image,
        # unless requested to keep them for debugging.
        if not savelogs:
            args += ' inst.nosave=all'
        cfg = re.sub(r'append initrd=initrd\.img.*', r'\g<0> %s' % args, cfg)

        # Change labels to explicit partitions.
        if release.startswith(('centos7', 'rhel7', 'rhel-7', 'oraclelinux7',
                               'centos8', 'rhel8')):
            cfg = re.sub(r'LABEL=[^ ]+', 'LABEL=INSTALLER', cfg)

        # Print out a the modifications.
        diff = difflib.Differ().compare(oldcfg.splitlines(1),
                                        cfg.splitlines(1))
        logging.info('Modified extlinux.conf:\n%s' % '\n'.join(diff))

        f.seek(0)
        f.write(cfg)
        f.truncate()

    # Activate extlinux.
    utils.Execute(['extlinux', '--install', 'installer/extlinux'])
def main():
  # Get Parameters.
  build_date = utils.GetMetadataAttribute(
      'build_date', raise_on_not_found=True)
  debian_cloud_images_version = utils.GetMetadataAttribute(
      'debian_cloud_images_version', raise_on_not_found=True)
  debian_version = utils.GetMetadataAttribute(
      'debian_version', raise_on_not_found=True)
  uefi = utils.GetMetadataAttribute('uefi', raise_on_not_found=True)
  image_dest = utils.GetMetadataAttribute('image_dest',
      raise_on_not_found=True)
  outs_path = utils.GetMetadataAttribute('daisy-outs-path',
      raise_on_not_found=True)

  logging.info('debian-cloud-images version: %s' % debian_cloud_images_version)
  logging.info('debian version: %s' % debian_version)

  # First, install fai-client from fai-project repository
  key_url = 'https://fai-project.org/download/2BF8D9FE074BCDE4.asc'
  urllib.request.urlretrieve(key_url, 'key.asc')
  utils.Execute(['apt-key', 'add', 'key.asc'])
  with open('/etc/apt/sources.list.d/fai-project.list', 'w') as fai_list:
    fai_list.write('deb https://fai-project.org/download stretch koeln')

  # force an apt-get update before next install
  utils.AptGetInstall.first_run = True
  utils.AptGetInstall(['fai-server', 'fai-setup-storage'])

  # Download and setup debian's debian-cloud-images scripts.
  url_params = {
      'project': 'debian-cloud-images',
      'commit': debian_cloud_images_version,
  }
  url_params['filename'] = '%(project)s-%(commit)s' % url_params

  url = "https://salsa.debian.org/cloud-team/" + \
      "%(project)s/-/archive/%(commit)s/%(filename)s.tar.gz" % url_params
  logging.info('Downloading %(project)s at commit %(commit)s' % url_params)
  urllib.request.urlretrieve(url, 'fci.tar.gz')
  with tarfile.open('fci.tar.gz') as tar:
    tar.extractall()
  logging.info('Downloaded and extracted %s.' % url)

  # Run fai-tool.
  work_dir = url_params['filename']
  fai_bin = 'bin/build'
  arch = 'amd64-efi' if uefi else 'amd64'
  cmd = [fai_bin, debian_version, 'gce', arch, 'disk', build_date]
  logging.info('Starting build in %s with params: %s' % (
      work_dir, ' '.join(cmd))
  )
  utils.Execute(cmd, cwd=work_dir, capture_output=True)

  # Packs a gzipped tar file with disk.raw inside
  disk_tar_gz = 'disk.tar.gz'
  logging.info('Compressing it into tarball %s' % disk_tar_gz)
  tar = tarfile.open(disk_tar_gz, "w:gz")
  tar.add('%s/disk.raw' % work_dir, arcname="disk.raw")
  tar.close()

  # Upload tar.
  logging.info('Saving %s to %s' % (disk_tar_gz, image_dest))
  utils.UploadFile(disk_tar_gz, image_dest)

  # Create and upload the synopsis of the image.
  logging.info('Creating image synopsis.')
  synopsis = {}
  packages = collections.OrderedDict()
  _, output = utils.Execute(['dpkg-query', '-W'], capture_output=True)
  for line in output.split('\n')[:-1]:  # Last line is an empty line.
    parts = line.split()
    packages[parts[0]] = parts[1]
  synopsis['installed_packages'] = packages
  with open('/tmp/synopsis.json', 'w') as f:
    f.write(json.dumps(synopsis))
  logging.info('Uploading image synopsis.')
  synopsis_dest = os.path.join(outs_path, 'synopsis.json')
  utils.UploadFile('/tmp/synopsis.json', synopsis_dest)
def main():
    # Get parameters from instance metadata.
    metadata_dest = utils.GetMetadataAttribute('metadata_dest',
                                               raise_on_not_found=True)
    image_id = utils.GetMetadataAttribute('image_id')
    image_name = utils.GetMetadataAttribute('image_name')
    image_family = utils.GetMetadataAttribute('image_family')
    distribution = utils.GetMetadataAttribute('distribution',
                                              raise_on_not_found=True)
    uefi = utils.GetMetadataAttribute('uefi', 'false').lower() == 'true'

    logging.info('Creating upload metadata of the image and packages.')

    utc_time = datetime.datetime.now(datetime.timezone.utc)
    image_version = utc_time.strftime('%Y%m%d')
    build_date = utc_time.astimezone().isoformat()
    image = {
        'id': image_id,
        'name': image_name,
        'family': image_family,
        'version': image_version,
        'build_date': build_date,
        'packages': [],
    }
    # All the guest environment packages maintained by guest-os team.
    guest_packages = [
        'google-cloud-packages-archive-keyring',
        'google-compute-engine',
        'google-compute-engine-oslogin',
        'google-guest-agent',
        'google-osconfig-agent',
        'gce-disk-expand',
    ]

    # This assumes that:
    # 1. /dev/sdb1 is the EFI system partition.
    # 2. /dev/sdb2 is the root mount for the installed system.
    if uefi:
        mount_disk = '/dev/sdb2'
    else:
        mount_disk = '/dev/sdb1'
    subprocess.run(['mount', mount_disk, '/mnt'], check=False)
    logging.info('Mount %s device to /mnt', mount_disk)

    if distribution == 'enterprise_linux':
        # chroot prevents access to /dev/random and /dev/urandom (as designed).
        # The rpm required those random bits to initialize GnuTLS otherwise
        # error: Failed to initialize NSS library.
        subprocess.run(['mount', '-o', 'bind', '/dev', '/mnt/dev'],
                       check=False)

    has_commit_hash = True
    if distribution == 'debian':
        cmd_prefix = [
            'chroot', '/mnt', 'dpkg-query', '-W', '--showformat',
            '${Package}\n${Version}\n${Git}'
        ]
    elif distribution == 'enterprise_linux':
        if 'centos-6' in image_family or 'rhel-6' in image_family:
            # centos-6 and rhel-6 doesn't support vcs tag
            cmd_prefix = [
                'chroot', '/mnt', 'rpm', '-q', '--queryformat',
                '%{NAME}\n%{VERSION}-%{RELEASE}'
            ]
            has_commit_hash = False
        else:
            cmd_prefix = [
                'chroot', '/mnt', 'rpm', '-q', '--queryformat',
                '%{NAME}\n%{VERSION}-%{RELEASE}\n%{VCS}'
            ]
    else:
        logging.error('Unknown Linux distribution.')
        return Exception

    version, commit_hash = '', ''
    for package in guest_packages:
        cmd = cmd_prefix + [package]
        try:
            stdout = subprocess.run(cmd, stdout=subprocess.PIPE,
                                    check=True).stdout
            stdout = stdout.decode()
            logging.info('Package metadata is %s', stdout)
        except subprocess.CalledProcessError as e:
            logging.warning('Fail to execute cmd. %s', e)
            continue
        if has_commit_hash:
            package, version, commit_hash = stdout.split('\n', 2)
        else:
            package, version = stdout.split('\n', 1)
        package_metadata = {
            'name': package,
            'version': version,
            'commit_hash': commit_hash,
        }
        image['packages'].append(package_metadata)

    # Write image metadata to a file.
    with tempfile.NamedTemporaryFile(mode='w', dir='/tmp', delete=False) as f:
        f.write(json.dumps(image))

    logging.info('Uploading image metadata.')
    try:
        utils.UploadFile(f.name, metadata_dest)
    except ValueError as e:
        logging.exception('ExportFailed: Failed uploading metadata file %s', e)
        sys.exit(1)

    logging.info('ExportSuccess: Export metadata was successful!')
Beispiel #11
0
def main():
    # Get Parameters.
    build_date = utils.GetMetadataAttribute('build_date',
                                            raise_on_not_found=True)
    debian_cloud_images_version = utils.GetMetadataAttribute(
        'debian_cloud_images_version', raise_on_not_found=True)
    debian_version = utils.GetMetadataAttribute('debian_version',
                                                raise_on_not_found=True)
    google_cloud_repo = utils.GetMetadataAttribute('google_cloud_repo',
                                                   raise_on_not_found=True)
    outs_path = utils.GetMetadataAttribute('daisy-outs-path',
                                           raise_on_not_found=True)

    logging.info('debian-cloud-images version: %s' %
                 debian_cloud_images_version)
    logging.info('debian version: %s' % debian_version)

    # force an apt-get update before next install
    utils.AptGetInstall.first_run = True
    utils.AptGetInstall(['fai-server', 'fai-setup-storage'])

    # Download and setup debian's debian-cloud-images scripts.
    url_params = {
        'project': 'debian-cloud-images',
        'version': debian_cloud_images_version,
    }
    url_params['filename'] = '%(project)s-%(version)s' % url_params

    url = ('https://salsa.debian.org/cloud-team/'
           '%(project)s/-/archive/%(version)s/%(filename)s.tar.gz' %
           url_params)
    logging.info('Downloading %(project)s at version %(version)s' % url_params)
    urllib.request.urlretrieve(url, 'fci.tar.gz')
    with tarfile.open('fci.tar.gz') as tar:
        tar.extractall()
    logging.info('Downloaded and extracted %s.' % url)

    # Config fai-tool
    work_dir = url_params['filename']
    fai_classes = [
        'DEBIAN', 'CLOUD', 'GCE', 'GCE_SDK', 'AMD64', 'GRUB_CLOUD_AMD64',
        'LINUX_IMAGE_CLOUD'
    ]
    if debian_version == 'buster':
        fai_classes += ['BUSTER', 'BACKPORTS']
    elif debian_version == "bullseye":
        fai_classes += ['BULLSEYE']
    elif debian_version == 'sid':
        fai_classes += ['SID']
    image_size = '10G'
    disk_name = 'disk.raw'
    config_space = os.getcwd() + work_dir + '/config_space/'
    apt_sources_base = 'files/etc/apt/sources.list.d/'

    # Copy GCE_SPECIFIC fai classes.
    CopyToConfigSpace('/files/fai_config/packages/GCE_SPECIFIC',
                      'package_config/GCE_SPECIFIC', config_space)
    os.mkdir(config_space + apt_sources_base + 'google-cloud.list')
    CopyToConfigSpace('/files/fai_config/sources/GCE_SPECIFIC',
                      apt_sources_base + 'google-cloud.list/GCE_SPECIFIC',
                      config_space)
    CopyToConfigSpace('/files/fai_config/sources/file_modes',
                      apt_sources_base + '/google-cloud.list/file_modes',
                      config_space)
    CopyToConfigSpace('/files/fai_config/sources/repository.GCE_SPECIFIC',
                      'hooks/repository.GCE_SPECIFIC', config_space)
    fai_classes += ['GCE_SPECIFIC']

    # GCE staging package repo.
    if google_cloud_repo == 'staging' or google_cloud_repo == 'unstable':
        os.mkdir(config_space + apt_sources_base + 'google-cloud-staging.list')
        CopyToConfigSpace(
            '/files/fai_config/sources/GCE_STAGING',
            apt_sources_base + 'google-cloud-staging.list/GCE_STAGING',
            config_space)
        CopyToConfigSpace(
            '/files/fai_config/sources/file_modes',
            apt_sources_base + 'google-cloud-staging.list/file_modes',
            config_space)
        CopyToConfigSpace('/files/fai_config/sources/repository.GCE_STAGING',
                          'hooks/repository.GCE_STAGING', config_space)
        fai_classes += ['GCE_STAGING']

    # GCE unstable package repo.
    if google_cloud_repo == 'unstable':
        os.mkdir(config_space + apt_sources_base +
                 'google-cloud-unstable.list')
        CopyToConfigSpace(
            '/files/fai_config/sources/GCE_UNSTABLE',
            apt_sources_base + 'google-cloud-unstable.list/GCE_UNSTABLE',
            config_space)
        CopyToConfigSpace(
            '/files/fai_config/sources/file_modes',
            apt_sources_base + 'google-cloud-unstable.list/file_modes',
            config_space)
        CopyToConfigSpace('/files/fai_config/sources/file_modes',
                          'hooks/repository.GCE_UNSTABLE', config_space)
        fai_classes += ['GCE_UNSTABLE']

    # Cleanup class for GCE.
    os.mkdir(config_space + 'scripts/GCE_CLEAN')
    CopyToConfigSpace('/files/fai_config/scripts/10-gce-clean',
                      'scripts/GCE_CLEAN/10-gce-clean', config_space)
    os.chmod(config_space + 'scripts/GCE_CLEAN/10-gce-clean', 0o755)
    fai_classes += ['GCE_CLEAN']

    # Remove failing test method for now.
    os.remove(config_space + 'hooks/tests.CLOUD')

    # Run fai-tool.
    cmd = [
        'fai-diskimage', '--verbose', '--hostname', 'debian', '--class',
        ','.join(fai_classes), '--size', image_size, '--cspace', config_space,
        disk_name
    ]
    logging.info('Starting build in %s with params: %s' %
                 (work_dir, ' '.join(cmd)))
    utils.Execute(cmd, cwd=work_dir, capture_output=True)

    # Packs a gzipped tar file with disk.raw inside
    disk_tar_gz = 'debian-{}-{}.tar.gz'.format(debian_version, build_date)
    logging.info('Compressing it into tarball %s' % disk_tar_gz)
    tar = tarfile.open(disk_tar_gz, 'w:gz')
    tar.add('%s/disk.raw' % work_dir, arcname='disk.raw')
    tar.close()

    # Upload tar.
    image_dest = os.path.join(outs_path, 'root.tar.gz')
    logging.info('Saving %s to %s' % (disk_tar_gz, image_dest))
    utils.UploadFile(disk_tar_gz, image_dest)
Beispiel #12
0
def main():
    # Get Parameters
    release = utils.GetMetadataAttribute('el_release', raise_on_not_found=True)
    savelogs = utils.GetMetadataAttribute('el_savelogs') == 'true'

    logging.info('EL Release: %s' % release)
    logging.info('Build working directory: %s' % os.getcwd())

    iso_file = '/files/installer.iso'
    ks_cfg = '/files/ks.cfg'

    utils.AptGetInstall(['rsync'])

    # Write the installer disk. Write GPT label, create partition,
    # copy installer boot files over.
    logging.info('Writing installer disk.')
    utils.Execute(['parted', '/dev/sdb', 'mklabel', 'gpt'])
    utils.Execute(['sync'])
    utils.Execute(
        ['parted', '/dev/sdb', 'mkpart', 'primary', 'fat32', '1MB', '1024MB'])
    utils.Execute(['sync'])
    utils.Execute(
        ['parted', '/dev/sdb', 'mkpart', 'primary', 'ext2', '1024MB', '100%'])
    utils.Execute(['sync'])
    utils.Execute(['parted', '/dev/sdb', 'set', '1', 'boot', 'on'])
    utils.Execute(['sync'])
    utils.Execute(['parted', '/dev/sdb', 'set', '1', 'esp', 'on'])
    utils.Execute(['sync'])
    utils.Execute(['mkfs.vfat', '-F', '32', '/dev/sdb1'])
    utils.Execute(['sync'])
    utils.Execute(['fatlabel', '/dev/sdb1', 'ESP'])
    utils.Execute(['sync'])
    utils.Execute(['mkfs.ext2', '-L', 'INSTALLER', '/dev/sdb2'])
    utils.Execute(['sync'])

    utils.Execute(['mkdir', '-vp', 'iso', 'installer', 'boot'])
    utils.Execute(['mount', '-o', 'ro,loop', '-t', 'iso9660', iso_file, 'iso'])
    utils.Execute(['mount', '-t', 'vfat', '/dev/sdb1', 'boot'])
    utils.Execute(['mount', '-t', 'ext2', '/dev/sdb2', 'installer'])
    utils.Execute(['rsync', '-Pav', 'iso/EFI', 'iso/images', 'boot/'])
    utils.Execute(['cp', iso_file, 'installer/'])
    utils.Execute(['cp', ks_cfg, 'installer/'])

    # Modify boot config.
    with open('boot/EFI/BOOT/grub.cfg', 'r+') as f:
        oldcfg = f.read()
        cfg = re.sub(r'-l .RHEL.*', r"""-l 'ESP'""", oldcfg)
        cfg = re.sub(r'timeout=60', 'timeout=1', cfg)
        cfg = re.sub(r'set default=.*', 'set default="0"', cfg)
        cfg = re.sub(
            r'load_video\n',
            r'serial --speed=38400 --unit=0 --word=8 --parity=no\n'
            'terminal_input serial\nterminal_output serial\n', cfg)

        # Change boot args.
        args = ' '.join([
            'inst.text',
            'inst.ks=hd:LABEL=INSTALLER:/%s' % ks_cfg, 'console=ttyS0,38400n8',
            'inst.gpt', 'inst.loglevel=debug'
        ])

        # Tell Anaconda not to store its logs in the installed image,
        # unless requested to keep them for debugging.
        if not savelogs:
            args += ' inst.nosave=all'
        cfg = re.sub(r'inst\.stage2.*', r'\g<0> %s' % args, cfg)

        # Change labels to explicit partitions.
        cfg = re.sub(r'LABEL=[^ ]+', 'LABEL=INSTALLER', cfg)

        # Print out a the modifications.
        diff = difflib.Differ().compare(oldcfg.splitlines(1),
                                        cfg.splitlines(1))
        logging.info('Modified grub.cfg:\n%s' % '\n'.join(diff))

        f.seek(0)
        f.write(cfg)
        f.truncate()

    utils.Execute(['umount', 'installer'])
    utils.Execute(['umount', 'iso'])
    utils.Execute(['umount', 'boot'])
Beispiel #13
0
def DistroSpecific(g):
    install_gce = utils.GetMetadataAttribute('install_gce_packages')
    deb_release = utils.GetMetadataAttribute('debian_release')

    if install_gce == 'true':
        logging.info('Installing GCE packages.')

        utils.update_apt(g)
        utils.install_apt_packages(g, 'gnupg')

        run(g, [
            'wget', 'https://packages.cloud.google.com/apt/doc/apt-key.gpg',
            '-O', '/tmp/gce_key'
        ])
        run(g, ['apt-key', 'add', '/tmp/gce_key'])
        g.rm('/tmp/gce_key')
        g.write('/etc/apt/sources.list.d/google-cloud.list',
                google_cloud.format(deb_release=deb_release))
        # Remove Azure agent.
        try:
            run(g,
                ['apt-get', 'remove', '-y', '-f', 'waagent', 'walinuxagent'])
        except Exception as e:
            logging.debug(str(e))
            logging.warn('Could not uninstall Azure agent. Continuing anyway.')

        utils.update_apt(g)
        pkgs = [
            'google-cloud-packages-archive-keyring', 'google-compute-engine'
        ]
        # Debian 8 differences:
        #   1. No NGE
        #   2. No Cloud SDK, since it requires Python 3.5+.
        #   3. No OS config agent.
        if deb_release == 'jessie':
            # Debian 8 doesn't support the new guest agent, so we need to install
            # the legacy Python version.
            pkgs += [
                'python-google-compute-engine', 'python3-google-compute-engine'
            ]
            logging.info('Skipping installation of OS Config agent. '
                         'Requires Debian 9 or newer.')
        else:
            pkgs += ['google-cloud-sdk', 'google-osconfig-agent']
        utils.install_apt_packages(g, *pkgs)

    # Update grub config to log to console.
    run(g, [
        'sed', '-i""', r'/GRUB_CMDLINE_LINUX/s#"$# console=ttyS0,38400n8"#',
        '/etc/default/grub'
    ])

    # Disable predictive network interface naming in Stretch and Buster.
    if deb_release in ['stretch', 'buster']:
        run(g, [
            'sed', '-i',
            r's#^\(GRUB_CMDLINE_LINUX=".*\)"$#\1 net.ifnames=0 biosdevname=0"#',
            '/etc/default/grub'
        ])

    run(g, ['update-grub2'])

    # Reset network for DHCP.
    logging.info('Resetting network to DHCP for eth0.')
    g.write('/etc/network/interfaces', interfaces)
def main():
  # Get Parameters.
  build_date = utils.GetMetadataAttribute(
      'build_date', raise_on_not_found=True)
  debian_cloud_images_version = '69783f7417aefb332d5d7250ba242adeca444131'
  debian_version = utils.GetMetadataAttribute(
      'debian_version', raise_on_not_found=True)
  outs_path = utils.GetMetadataAttribute('daisy-outs-path',
                                         raise_on_not_found=True)

  logging.info('debian-cloud-images version: %s' % debian_cloud_images_version)
  logging.info('debian version: %s' % debian_version)

  # force an apt-get update before next install
  utils.AptGetInstall.first_run = True
  utils.AptGetInstall(['fai-server', 'fai-setup-storage'])

  # Download and setup debian's debian-cloud-images scripts.
  url_params = {
      'project': 'debian-cloud-images',
      'version': debian_cloud_images_version,
  }
  url_params['filename'] = '%(project)s-%(version)s' % url_params

  url = ('https://salsa.debian.org/cloud-team/'
         '%(project)s/-/archive/%(version)s/%(filename)s.tar.gz' % url_params)

  logging.info('Downloading %(project)s at version %(version)s', url_params)
  urllib.request.urlretrieve(url, 'fci.tar.gz')
  with tarfile.open('fci.tar.gz') as tar:
    tar.extractall()
  logging.info('Downloaded and extracted %s.', url)

  work_dir = url_params['filename']
  config_space = os.getcwd() + '/' + work_dir + '/config_space/'

  # We are going to replace this with our variant
  os.remove(config_space + 'class/BULLSEYE.var')

  # Remove failing test method for now.
  os.remove(config_space + 'hooks/tests.CLOUD')

  # Copy our classes to the FAI config space
  mycopytree('/files/fai_config', config_space)

  # Set scripts executable (daisy doesn't preserve this)
  os.chmod(config_space + 'scripts/GCE_CLEAN/10-gce-clean', 0o755)
  os.chmod(config_space + 'scripts/GCE_SPECIFIC/12-sshd', 0o755)

  # Config fai-tool
  # Base classes
  fai_classes = ['DEBIAN', 'CLOUD', 'GCE', 'GCE_SDK', 'LINUX_IMAGE_CLOUD',
                 'GCE_SPECIFIC', 'GCE_CLEAN']

  # Arch-specific classes
  if platform.machine() == 'aarch64':
    fai_classes += ['ARM64', 'GRUB_EFI_ARM64', 'BACKPORTS_LINUX']
  else:
    fai_classes += ['AMD64', 'GRUB_CLOUD_AMD64']

  # Version-specific classes
  if debian_version == 'buster':
    fai_classes += ['BUSTER']
  elif debian_version == 'bullseye':
    fai_classes += ['BULLSEYE']
  elif debian_version == 'sid':
    fai_classes += ['SID']

  image_size = '10G'
  disk_name = 'disk.raw'

  # Run fai-tool.
  cmd = ['fai-diskimage', '--verbose', '--hostname', 'debian', '--class',
         ','.join(fai_classes), '--size', image_size, '--cspace',
         config_space, disk_name]
  logging.info('Starting build in %s with params: %s', work_dir, ' '.join(cmd))
  returncode, output = utils.Execute(
      cmd, cwd=work_dir, capture_output=True, raise_errors=False)

  # Verbose printing to console for debugging.
  for line in output.splitlines():
    print(line)

  if returncode != 0:
    raise subprocess.CalledProcessError(returncode, cmd)

  # Packs a gzipped tar file with disk.raw inside
  disk_tar_gz = 'debian-{}-{}.tar.gz'.format(debian_version, build_date)
  logging.info('Compressing it into tarball %s', disk_tar_gz)
  tar = tarfile.open(disk_tar_gz, 'w:gz', format=tarfile.GNU_FORMAT)
  tar.add('%s/%s' % (work_dir, disk_name), arcname=disk_name)
  tar.close()

  # Upload tar.
  image_dest = os.path.join(outs_path, 'root.tar.gz')
  logging.info('Saving %s to %s', disk_tar_gz, image_dest)
  utils.UploadFile(disk_tar_gz, image_dest)
def DistroSpecific(g):
    el_release = utils.GetMetadataAttribute('el_release')
    install_gce = utils.GetMetadataAttribute('install_gce_packages')
    rhel_license = utils.GetMetadataAttribute('use_rhel_gce_license')

    if rhel_license == 'true':
        if 'Red Hat' in g.cat('/etc/redhat-release'):
            g.command(['yum', 'remove', '-y', '*rhui*'])
            logging.info('Adding in GCE RHUI package.')
            g.write('/etc/yum.repos.d/google-cloud.repo',
                    repo_compute % el_release)
            g.command([
                'yum', 'install', '-y',
                'google-rhui-client-rhel%s' % el_release
            ])

    if install_gce == 'true':
        logging.info('Installing GCE packages.')
        g.write('/etc/yum.repos.d/google-cloud.repo',
                repo_compute % el_release)
        if el_release == '7':
            g.write_append('/etc/yum.repos.d/google-cloud.repo',
                           repo_sdk % el_release)
            g.command(['yum', '-y', 'install', 'google-cloud-sdk'])
        if el_release == '6':
            if 'CentOS' in g.cat('/etc/redhat-release'):
                logging.info('Installing CentOS SCL.')
                g.command(['rm', '-f', '/etc/yum.repos.d/CentOS-SCL.repo'])
                g.command(['yum', '-y', 'install', 'centos-release-scl'])
            # Install Google Cloud SDK from the upstream tar and create links for the
            # python27 SCL environment.
            logging.info('Installing python27 from SCL.')
            g.command(['yum', '-y', 'install', 'python27'])
            g.command([
                'scl', 'enable', 'python27',
                'pip2.7 install --upgrade google_compute_engine'
            ])

            logging.info('Installing Google Cloud SDK from tar.')
            sdk_base_url = 'https://dl.google.com/dl/cloudsdk/channels/rapid'
            sdk_base_tar = '%s/google-cloud-sdk.tar.gz' % sdk_base_url
            tar = utils.HttpGet(sdk_base_tar)
            g.write('/tmp/google-cloud-sdk.tar.gz', tar)
            g.command(
                ['tar', 'xzf', '/tmp/google-cloud-sdk.tar.gz', '-C', '/tmp'])
            sdk_version = g.cat('/tmp/google-cloud-sdk/VERSION').strip()

            logging.info('Getting Cloud SDK Version %s', sdk_version)
            sdk_version_tar = 'google-cloud-sdk-%s-linux-x86_64.tar.gz' % sdk_version
            sdk_version_tar_url = '%s/downloads/%s' % (sdk_base_url,
                                                       sdk_version_tar)
            logging.info('Getting versioned Cloud SDK tar file from %s',
                         sdk_version_tar_url)
            tar = utils.HttpGet(sdk_version_tar_url)
            sdk_version_tar_file = os.path.join('/tmp', sdk_version_tar)
            g.write(sdk_version_tar_file, tar)
            g.mkdir_p('/usr/local/share/google')
            g.command([
                'tar', 'xzf', sdk_version_tar_file, '-C',
                '/usr/local/share/google', '--no-same-owner'
            ])

            logging.info('Creating CloudSDK SCL symlinks.')
            sdk_bin_path = '/usr/local/share/google/google-cloud-sdk/bin'
            g.ln_s(os.path.join(sdk_bin_path, 'git-credential-gcloud.sh'),
                   os.path.join('/usr/bin', 'git-credential-gcloud.sh'))
            for binary in ['bq', 'gcloud', 'gsutil']:
                binary_path = os.path.join(sdk_bin_path, binary)
                new_bin_path = os.path.join('/usr/bin', binary)
                bin_str = '#!/bin/bash\nsource /opt/rh/python27/enable\n%s $@' % binary_path
                g.write(new_bin_path, bin_str)
                g.chmod(0755, new_bin_path)

        g.command([
            'yum', '-y', 'install', 'google-compute-engine',
            'python-google-compute-engine'
        ])

    logging.info('Updating initramfs')
    for kver in g.ls('/lib/modules'):
        if el_release == '6':
            # Version 6 doesn't have option --kver
            g.command(['dracut', '-v', '-f', kver])
        else:
            g.command(['dracut', '-v', '-f', '--kver', kver])

    logging.info('Update grub configuration')
    if el_release == '6':
        # Version 6 doesn't have grub2, file grub.conf needs to be updated by hand
        g.write('/tmp/grub_gce_generated', grub_cfg)
        g.sh(r'grep -P "^[\t ]*initrd|^[\t ]*root|^[\t ]*kernel|^[\t ]*title" '
             r'/boot/grub/grub.conf >> /tmp/grub_gce_generated;'
             r'sed -i "s/console=ttyS0[^ ]*//g" /tmp/grub_gce_generated;'
             r'sed -i "/^[\t ]*kernel/s/$/ console=ttyS0,38400n8/" '
             r'/tmp/grub_gce_generated;'
             r'mv /tmp/grub_gce_generated /boot/grub/grub.conf')
    else:
        g.write('/etc/default/grub', grub2_cfg)
        g.command(['grub2-mkconfig', '-o', '/boot/grub2/grub.cfg'])

    # Reset network for DHCP.
    logging.info('Resetting network to DHCP for eth0.')
    g.write('/etc/sysconfig/network-scripts/ifcfg-eth0', ifcfg_eth0)
def DistroSpecific(g: guestfs.GuestFS):
    el_release = utils.GetMetadataAttribute('el_release')
    install_gce = utils.GetMetadataAttribute('install_gce_packages')
    rhel_license = utils.GetMetadataAttribute('use_rhel_gce_license')

    # This must be performed prior to making network calls from the guest.
    # Otherwise, if /etc/resolv.conf is present, and has an immutable attribute,
    # guestfs will fail with:
    #
    #   rename: /sysroot/etc/resolv.conf to
    #     /sysroot/etc/i9r7obu6: Operation not permitted
    utils.common.ClearEtcResolv(g)

    # Some imported images haven't contained `/etc/yum.repos.d`.
    if not g.exists('/etc/yum.repos.d'):
        g.mkdir('/etc/yum.repos.d')

    if rhel_license == 'true':
        if 'Red Hat' in g.cat('/etc/redhat-release'):
            g.command(['yum', 'remove', '-y', '*rhui*'])
            logging.info('Adding in GCE RHUI package.')
            g.write('/etc/yum.repos.d/google-cloud.repo',
                    repo_compute % el_release)
            yum_install(g, 'google-rhui-client-rhel' + el_release)

    if install_gce == 'true':
        logging.info('Installing GCE packages.')
        g.write('/etc/yum.repos.d/google-cloud.repo',
                repo_compute % el_release)
        if el_release == '6':
            if 'CentOS' in g.cat('/etc/redhat-release'):
                logging.info('Installing CentOS SCL.')
                g.command(['rm', '-f', '/etc/yum.repos.d/CentOS-SCL.repo'])
                yum_install(g, 'centos-release-scl')
            # Install Google Cloud SDK from the upstream tar and create links for the
            # python27 SCL environment.
            logging.info('Installing python27 from SCL.')
            yum_install(g, 'python27')
            logging.info('Installing Google Cloud SDK from tar.')
            sdk_base_url = 'https://dl.google.com/dl/cloudsdk/channels/rapid'
            sdk_base_tar = '%s/google-cloud-sdk.tar.gz' % sdk_base_url
            tar = utils.HttpGet(sdk_base_tar)
            g.write('/tmp/google-cloud-sdk.tar.gz', tar)
            g.command(
                ['tar', 'xzf', '/tmp/google-cloud-sdk.tar.gz', '-C', '/tmp'])
            sdk_version = g.cat('/tmp/google-cloud-sdk/VERSION').strip()

            logging.info('Getting Cloud SDK Version %s', sdk_version)
            sdk_version_tar = 'google-cloud-sdk-%s-linux-x86_64.tar.gz' % sdk_version
            sdk_version_tar_url = '%s/downloads/%s' % (sdk_base_url,
                                                       sdk_version_tar)
            logging.info('Getting versioned Cloud SDK tar file from %s',
                         sdk_version_tar_url)
            tar = utils.HttpGet(sdk_version_tar_url)
            sdk_version_tar_file = os.path.join('/tmp', sdk_version_tar)
            g.write(sdk_version_tar_file, tar)
            g.mkdir_p('/usr/local/share/google')
            g.command([
                'tar', 'xzf', sdk_version_tar_file, '-C',
                '/usr/local/share/google', '--no-same-owner'
            ])

            logging.info('Creating CloudSDK SCL symlinks.')
            sdk_bin_path = '/usr/local/share/google/google-cloud-sdk/bin'
            g.ln_s(os.path.join(sdk_bin_path, 'git-credential-gcloud.sh'),
                   os.path.join('/usr/bin', 'git-credential-gcloud.sh'))
            for binary in ['bq', 'gcloud', 'gsutil']:
                binary_path = os.path.join(sdk_bin_path, binary)
                new_bin_path = os.path.join('/usr/bin', binary)
                bin_str = '#!/bin/bash\nsource /opt/rh/python27/enable\n%s $@' % \
                    binary_path
                g.write(new_bin_path, bin_str)
                g.chmod(0o755, new_bin_path)
        else:
            g.write_append('/etc/yum.repos.d/google-cloud.repo',
                           repo_sdk % el_release)
            yum_install(g, 'google-cloud-sdk')
        yum_install(g, 'google-compute-engine', 'google-osconfig-agent')

    logging.info('Updating initramfs')
    for kver in g.ls('/lib/modules'):
        # Although each directory in /lib/modules typically corresponds to a
        # kernel version  [1], that may not always be true.
        # kernel-abi-whitelists, for example, creates extra directories in
        # /lib/modules.
        #
        # Skip building initramfs if the directory doesn't look like a
        # kernel version. Emulates the version matching from depmod [2].
        #
        # 1. https://tldp.org/LDP/Linux-Filesystem-Hierarchy/html/lib.html
        # 2. https://kernel.googlesource.com/pub/scm/linux/kernel/git/mmarek/kmod
        # /+/tip/tools/depmod.c#2537
        if not re.match(r'^\d+.\d+', kver):
            logging.debug(
                '/lib/modules/{} doesn\'t look like a kernel directory. '
                'Skipping creation of initramfs for it'.format(kver))
            continue
        if not g.exists(os.path.join('/lib/modules', kver, 'modules.dep')):
            try:
                g.command(['depmod', kver])
            except RuntimeError as e:
                logging.info(
                    'Failed to write initramfs for {kver}. If image fails to '
                    'boot, verify that depmod /lib/modules/{kver} runs on '
                    'the original machine'.format(kver=kver))
                logging.debug('depmod error: {}'.format(e))
                continue
        if el_release == '6':
            # Version 6 doesn't have option --kver
            g.command(['dracut', '-v', '-f', kver])
        else:
            g.command(['dracut', '--stdlog=1', '-f', '--kver', kver])

    logging.info('Update grub configuration')
    if el_release == '6':
        # Version 6 doesn't have grub2, file grub.conf needs to be updated by hand
        g.write('/tmp/grub_gce_generated', grub_cfg)
        g.sh(r'grep -P "^[\t ]*initrd|^[\t ]*root|^[\t ]*kernel|^[\t ]*title" '
             r'/boot/grub/grub.conf >> /tmp/grub_gce_generated;'
             r'sed -i "s/console=ttyS0[^ ]*//g" /tmp/grub_gce_generated;'
             r'sed -i "/^[\t ]*kernel/s/$/ console=ttyS0,38400n8/" '
             r'/tmp/grub_gce_generated;'
             r'mv /tmp/grub_gce_generated /boot/grub/grub.conf')
    else:
        g.write('/etc/default/grub', grub2_cfg)
        g.command(['grub2-mkconfig', '-o', '/boot/grub2/grub.cfg'])

    # Reset network for DHCP.
    logging.info('Resetting network to DHCP for eth0.')
    # Remove NetworkManager-config-server if it's present. The package configures
    # NetworkManager to *not* use DHCP.
    #  https://access.redhat.com/solutions/894763
    g.command(['yum', 'remove', '-y', 'NetworkManager-config-server'])
    g.write('/etc/sysconfig/network-scripts/ifcfg-eth0', ifcfg_eth0)