def main():
    raise_on_not_found = True
    logs_path = utils.GetMetadataAttribute('daisy-logs-path',
                                           raise_on_not_found)
    outs_path = utils.GetMetadataAttribute('daisy-outs-path',
                                           raise_on_not_found)
    uefi = utils.GetMetadataAttribute('rhel_uefi') == 'true'

    # Mount the installer disk.
    if uefi:
        utils.Execute(['mount', '/dev/sdb2', '/mnt'])
    else:
        utils.Execute(['mount', '/dev/sdb1', '/mnt'])

    logging.info('Installer root: %s' % os.listdir('/mnt'))
    logging.info('Build logs: %s' % os.listdir('/mnt/build-logs'))

    utils.UploadFile('/mnt/ks.cfg', '%s/ks.cfg' % logs_path)
    directory = '/mnt/build-logs'
    for log in os.listdir(directory):
        if os.path.isfile(log):
            utils.UploadFile(os.path.join(directory, log),
                             '%s/%s' % (logs_path, log))
    utils.UploadFile('/mnt/build-logs/synopsis.json',
                     '%s/synopsis.json' % outs_path)

    utils.Execute(['umount', '-l', '/mnt'])
def main():
  raise_on_not_found = True
  logs_path = utils.GetMetadataAttribute('daisy-logs-path', raise_on_not_found)
  outs_path = utils.GetMetadataAttribute('daisy-outs-path', raise_on_not_found)

  # Mount the installer disk.
  utils.Execute(['mount', '/dev/sdb1', '/mnt'])

  logging.info('Installer root: %s' % os.listdir('/mnt'))
  logging.info('Build logs: %s' % os.listdir('/mnt/build-logs'))

  utils.UploadFile('/mnt/ks.cfg', '%s/' % logs_path)
  directory = '/mnt/build-logs'
  for f in os.listdir(directory):
    if os.path.isfile(f):
      utils.UploadFile('%s/%s' % (directory, f), '%s/' % logs_path)
  utils.UploadFile('/mnt/build-logs/synopsis.json',
      '%s/synopsis.json' % outs_path)

  utils.Execute(['umount', '-l', '/mnt'])
Exemple #3
0
def main():
    # Get Parameters.
    build_date = utils.GetMetadataAttribute('build_date',
                                            raise_on_not_found=True)
    debian_cloud_images_version = utils.GetMetadataAttribute(
        'debian_cloud_images_version', raise_on_not_found=True)
    debian_version = utils.GetMetadataAttribute('debian_version',
                                                raise_on_not_found=True)
    outs_path = utils.GetMetadataAttribute('daisy-outs-path',
                                           raise_on_not_found=True)

    logging.info('debian-cloud-images version: %s' %
                 debian_cloud_images_version)
    logging.info('debian version: %s' % debian_version)

    # force an apt-get update before next install
    utils.AptGetInstall.first_run = True
    utils.AptGetInstall(['fai-server', 'fai-setup-storage'])

    # Download and setup debian's debian-cloud-images scripts.
    url_params = {
        'project': 'debian-cloud-images',
        'version': debian_cloud_images_version,
    }
    url_params['filename'] = '%(project)s-%(version)s' % url_params

    url = ('https://salsa.debian.org/cloud-team/'
           '%(project)s/-/archive/%(version)s/%(filename)s.tar.gz' %
           url_params)

    logging.info('Downloading %(project)s at version %(version)s', url_params)
    urllib.request.urlretrieve(url, 'fci.tar.gz')
    with tarfile.open('fci.tar.gz') as tar:
        tar.extractall()
    logging.info('Downloaded and extracted %s.', url)

    # Copy our classes to the FAI config space
    work_dir = url_params['filename']
    config_space = os.getcwd() + work_dir + '/config_space/'
    mycopytree('/files/fai_config', config_space)

    # Remove failing test method for now.
    os.remove(config_space + 'hooks/tests.CLOUD')

    # Config fai-tool
    fai_classes = [
        'DEBIAN', 'CLOUD', 'GCE', 'GCE_SDK', 'AMD64', 'GRUB_CLOUD_AMD64',
        'LINUX_IMAGE_CLOUD', 'GCE_SPECIFIC', 'GCE_CLEAN'
    ]
    if debian_version == 'buster':
        fai_classes += ['BUSTER', 'BACKPORTS']
    elif debian_version == 'bullseye':
        fai_classes += ['BULLSEYE']
    elif debian_version == 'sid':
        fai_classes += ['SID']

    image_size = '10G'
    disk_name = 'disk.raw'

    # Run fai-tool.
    cmd = [
        'fai-diskimage', '--verbose', '--hostname', 'debian', '--class',
        ','.join(fai_classes), '--size', image_size, '--cspace', config_space,
        disk_name
    ]
    logging.info('Starting build in %s with params: %s', work_dir,
                 ' '.join(cmd))
    utils.Execute(cmd, cwd=work_dir, capture_output=True)

    # Packs a gzipped tar file with disk.raw inside
    disk_tar_gz = 'debian-{}-{}.tar.gz'.format(debian_version, build_date)
    logging.info('Compressing it into tarball %s', disk_tar_gz)
    tar = tarfile.open(disk_tar_gz, 'w:gz')
    tar.add('%s/disk.raw' % work_dir, arcname='disk.raw')
    tar.close()

    # Upload tar.
    image_dest = os.path.join(outs_path, 'root.tar.gz')
    logging.info('Saving %s to %s', disk_tar_gz, image_dest)
    utils.UploadFile(disk_tar_gz, image_dest)
Exemple #4
0
def main():
  # Get Parameters.
  bvz_manifest = utils.GetMetadataAttribute(
      'bootstrap_vz_manifest', raise_on_not_found=True)
  bvz_version = utils.GetMetadataAttribute(
      'bootstrap_vz_version', raise_on_not_found=True)
  repo = utils.GetMetadataAttribute('google_cloud_repo',
      raise_on_not_found=True).strip()
  image_dest = utils.GetMetadataAttribute('image_dest',
      raise_on_not_found=True)
  outs_path = utils.GetMetadataAttribute('daisy-outs-path',
      raise_on_not_found=True)
  if repo not in REPOS:
    raise ValueError(
        'Metadata "google_cloud_repo" must be one of %s.' % REPOS)

  logging.info('Bootstrap_vz manifest: %s' % bvz_manifest)
  logging.info('Bootstrap_vz version: %s' % bvz_version)
  logging.info('Google Cloud repo: %s' % repo)

  # Download and setup bootstrap_vz.
  bvz_url = 'https://github.com/andsens/bootstrap-vz/archive/%s.zip'
  bvz_url %= bvz_version
  bvz_zip_dir = 'bvz_zip'
  logging.info('Downloading bootstrap-vz at commit %s' % bvz_version)
  urllib.urlretrieve(bvz_url, 'bvz.zip')
  with zipfile.ZipFile('bvz.zip', 'r') as z:
    z.extractall(bvz_zip_dir)
  logging.info('Downloaded and extracted %s to bvz.zip.' % bvz_url)
  bvz_zip_contents = [d for d in os.listdir(bvz_zip_dir)]
  bvz_zip_subdir = os.path.join(bvz_zip_dir, bvz_zip_contents[0])
  utils.Execute(['mv', bvz_zip_subdir, BVZ_DIR])
  logging.info('Moved bootstrap_vz from %s to %s.' % (bvz_zip_subdir, BVZ_DIR))
  bvz_bin = os.path.join(BVZ_DIR, 'bootstrap-vz')
  utils.MakeExecutable(bvz_bin)
  logging.info('Made %s executable.' % bvz_bin)
  bvz_manifest_file = os.path.join(BVZ_DIR, 'manifests', bvz_manifest)

  # Inject Google Cloud test repo plugin if using staging or unstable repos.
  # This is used to test new package releases in images.
  if repo != 'stable':
    logging.info('Adding Google Cloud test repos plugin for bootstrapvz.')
    repo_plugin_dir = '/files/google_cloud_test_repos'
    bvz_plugins = os.path.join(BVZ_DIR, 'bootstrapvz', 'plugins')
    shutil.move(repo_plugin_dir, bvz_plugins)

    with open(bvz_manifest_file, 'r+') as manifest_file:
      manifest_data = yaml.load(manifest_file)
      manifest_plugins = manifest_data['plugins']
      manifest_plugins['google_cloud_test_repos'] = {repo: True}
      manifest_yaml = yaml.dump(manifest_data, default_flow_style=False)
      manifest_file.write(manifest_yaml)

  # Run bootstrap_vz build.
  cmd = [bvz_bin, '--debug', bvz_manifest_file]
  logging.info('Starting build in %s with params: %s' % (BVZ_DIR, str(cmd)))
  utils.Execute(cmd, cwd=BVZ_DIR)

  # Upload tar.
  image_tar_gz = '/target/disk.tar.gz'
  if os.path.exists(image_tar_gz):
    logging.info('Saving %s to %s' % (image_tar_gz, image_dest))
    utils.UploadFile(image_tar_gz, image_dest)

  # Create and upload the synopsis of the image.
  logging.info('Creating image synopsis.')
  synopsis = {}
  packages = collections.OrderedDict()
  _, output = utils.Execute(['dpkg-query', '-W'], capture_output=True)
  for line in output.split('\n')[:-1]:  # Last line is an empty line.
    parts = line.split()
    packages[parts[0]] = parts[1]
  synopsis['installed_packages'] = packages
  with open('/tmp/synopsis.json', 'w') as f:
    f.write(json.dumps(synopsis))
  logging.info('Uploading image synopsis.')
  synopsis_dest = os.path.join(outs_path, 'synopsis.json')
  utils.UploadFile('/tmp/synopsis.json', synopsis_dest)
def main():
  # Get Parameters.
  debian_cloud_images_version = utils.GetMetadataAttribute(
      'debian_cloud_images_version', raise_on_not_found=True)
  debian_version = utils.GetMetadataAttribute(
      'debian_version', raise_on_not_found=True)
  uefi = utils.GetMetadataAttribute('uefi', raise_on_not_found=True)
  image_dest = utils.GetMetadataAttribute('image_dest',
      raise_on_not_found=True)
  outs_path = utils.GetMetadataAttribute('daisy-outs-path',
      raise_on_not_found=True)

  logging.info('debian-cloud-images version: %s' % debian_cloud_images_version)
  logging.info('debian version: %s' % debian_version)

  # First, install fai-client from fai-project repository
  key_url = 'https://fai-project.org/download/2BF8D9FE074BCDE4.asc'
  urllib.urlretrieve(key_url, 'key.asc')
  utils.Execute(['apt-key', 'add', 'key.asc'])
  with open('/etc/apt/sources.list.d/fai-project.list', 'w') as fai_list:
    fai_list.write('deb https://fai-project.org/download stretch koeln')

  # force an apt-get update before next install
  utils.AptGetInstall.first_run = True
  utils.AptGetInstall(['fai-server', 'fai-setup-storage'])

  # Download and setup debian's debian-cloud-images scripts.
  url_params = {
      'project': 'debian-cloud-images',
      'commit': debian_cloud_images_version,
  }
  url_params['filename'] = '%(project)s-%(commit)s' % url_params

  url = "https://salsa.debian.org/cloud-team/" + \
      "%(project)s/-/archive/%(commit)s/%(filename)s.tar.gz" % url_params
  logging.info('Downloading %(project)s at commit %(commit)s' % url_params)
  urllib.urlretrieve(url, 'fci.tar.gz')
  with tarfile.open('fci.tar.gz') as tar:
    tar.extractall()
  logging.info('Downloaded and extracted %s.' % url)

  # Run fai-tool.
  work_dir = url_params['filename']
  fai_bin = 'bin/build'
  arch = 'amd64-efi' if uefi else 'amd64'
  cmd = [fai_bin, debian_version, 'gce', arch, 'disk']
  logging.info('Starting build in %s with params: %s' % (
      work_dir, ' '.join(cmd))
  )
  utils.Execute(cmd, cwd=work_dir, capture_output=True)

  # Packs a gzipped tar file with disk.raw inside
  disk_tar_gz = 'disk.tar.gz'
  logging.info('Compressing it into tarball %s' % disk_tar_gz)
  tar = tarfile.open(disk_tar_gz, "w:gz")
  tar.add('%s/disk.raw' % work_dir, arcname="disk.raw")
  tar.close()

  # Upload tar.
  logging.info('Saving %s to %s' % (disk_tar_gz, image_dest))
  utils.UploadFile(disk_tar_gz, image_dest)

  # Create and upload the synopsis of the image.
  logging.info('Creating image synopsis.')
  synopsis = {}
  packages = collections.OrderedDict()
  _, output = utils.Execute(['dpkg-query', '-W'], capture_output=True)
  for line in output.split('\n')[:-1]:  # Last line is an empty line.
    parts = line.split()
    packages[parts[0]] = parts[1]
  synopsis['installed_packages'] = packages
  with open('/tmp/synopsis.json', 'w') as f:
    f.write(json.dumps(synopsis))
  logging.info('Uploading image synopsis.')
  synopsis_dest = os.path.join(outs_path, 'synopsis.json')
  utils.UploadFile('/tmp/synopsis.json', synopsis_dest)
Exemple #6
0
def main():
    # Get Parameters.
    build_date = utils.GetMetadataAttribute('build_date',
                                            raise_on_not_found=True)
    debian_cloud_images_version = utils.GetMetadataAttribute(
        'debian_cloud_images_version', raise_on_not_found=True)
    debian_version = utils.GetMetadataAttribute('debian_version',
                                                raise_on_not_found=True)
    image_dest = utils.GetMetadataAttribute('image_dest',
                                            raise_on_not_found=True)
    outs_path = utils.GetMetadataAttribute('daisy-outs-path',
                                           raise_on_not_found=True)

    logging.info('debian-cloud-images version: %s' %
                 debian_cloud_images_version)
    logging.info('debian version: %s' % debian_version)

    # force an apt-get update before next install
    utils.AptGetInstall.first_run = True

    debian_host_version = utils.Execute(['cat', '/etc/debian_version'],
                                        capture_output=True)
    # the FAI's version in stretch does not satisfy our need, so the version from
    # stretch-backports is needed.
    if debian_host_version[1].startswith('9'):
        utils.AptGetInstall(['fai-server', 'fai-setup-storage'],
                            'stretch-backports')
    else:
        utils.AptGetInstall(['fai-server', 'fai-setup-storage'])

    # Download and setup debian's debian-cloud-images scripts.
    url_params = {
        'project': 'debian-cloud-images',
        'version': debian_cloud_images_version,
    }
    url_params['filename'] = '%(project)s-%(version)s' % url_params

    url = "https://salsa.debian.org/cloud-team/" + \
        "%(project)s/-/archive/%(version)s/%(filename)s.tar.gz" % url_params
    logging.info('Downloading %(project)s at version %(version)s' % url_params)
    urllib.request.urlretrieve(url, 'fci.tar.gz')
    with tarfile.open('fci.tar.gz') as tar:
        tar.extractall()
    logging.info('Downloaded and extracted %s.' % url)

    # Config fai-tool
    work_dir = url_params['filename']
    fai_classes = [
        'DEBIAN', 'CLOUD', 'GCE', 'GCE_SDK', 'AMD64', 'GRUB_CLOUD_AMD64',
        'LINUX_IMAGE_CLOUD'
    ]
    if debian_version == 'stretch':
        fai_classes += ['STRETCH', 'BACKPORTS', 'BACKPORTS_LINUX']
    elif debian_version == 'buster':
        fai_classes += ['BUSTER']
    elif debian_version == 'sid':
        fai_classes += ['SID']
    image_size = '10G'
    disk_name = 'disk.raw'
    config_space = os.getcwd() + work_dir + '/config_space/'

    # Copy GCE_SPECIFIC fai class
    utils.Execute([
        'cp', '/files/fai_config/packages/GCE_SPECIFIC',
        config_space + 'package_config/GCE_SPECIFIC'
    ])
    fai_classes += ['GCE_SPECIFIC']

    # Run fai-tool.
    cmd = [
        'fai-diskimage', '--verbose', '--hostname', 'debian', '--class',
        ','.join(fai_classes), '--size', image_size, '--cspace', config_space,
        disk_name
    ]
    logging.info('Starting build in %s with params: %s' %
                 (work_dir, ' '.join(cmd)))
    utils.Execute(cmd, cwd=work_dir, capture_output=True)

    # Packs a gzipped tar file with disk.raw inside
    disk_tar_gz = 'debian-%s-%s.tar.gz' % [debian_version, build_date]
    logging.info('Compressing it into tarball %s' % disk_tar_gz)
    tar = tarfile.open(disk_tar_gz, "w:gz")
    tar.add('%s/disk.raw' % work_dir, arcname="disk.raw")
    tar.close()

    # Upload tar.
    logging.info('Saving %s to %s' % (disk_tar_gz, image_dest))
    utils.UploadFile(disk_tar_gz, image_dest)

    # Create and upload the synopsis of the image.
    logging.info('Creating image synopsis.')
    synopsis = {}
    packages = collections.OrderedDict()
    _, output = utils.Execute(['dpkg-query', '-W'], capture_output=True)
    for line in output.split('\n')[:-1]:  # Last line is an empty line.
        parts = line.split()
        packages[parts[0]] = parts[1]
    synopsis['installed_packages'] = packages
    with open('/tmp/synopsis.json', 'w') as f:
        f.write(json.dumps(synopsis))
    logging.info('Uploading image synopsis.')
    synopsis_dest = os.path.join(outs_path, 'synopsis.json')
    utils.UploadFile('/tmp/synopsis.json', synopsis_dest)
Exemple #7
0
def main():
    # Get parameters from instance metadata.
    image_id = utils.GetMetadataAttribute('image_id')
    image_name = utils.GetMetadataAttribute('image_name')
    image_family = utils.GetMetadataAttribute('image_family')
    distribution = utils.GetMetadataAttribute('distribution',
                                              raise_on_not_found=True)
    uefi = utils.GetMetadataAttribute('uefi', 'false').lower() == 'true'
    outs_path = utils.GetMetadataAttribute('daisy-outs-path')

    logging.info('Creating upload metadata of the image and packages.')

    utc_time = datetime.datetime.now(datetime.timezone.utc)
    image_version = image_name.rsplit("v")[-1]
    publish_date = utc_time.astimezone().isoformat()
    image = {
        'id': image_id,
        'name': image_name,
        'family': image_family,
        'version': image_version,
        'publish_date': publish_date,
        'packages': [],
    }

    # All the guest environment packages maintained by guest-os team.
    guest_packages = [
        'google-compute-engine',
        'google-compute-engine-oslogin',
        'google-guest-agent',
        'google-osconfig-agent',
        'gce-disk-expand',
    ]

    # This assumes that:
    # 1. /dev/sdb1 is the EFI system partition.
    # 2. /dev/sdb2 is the root mount for the installed system.
    # Except for debian 10, which has out-of-order partitions.
    if uefi and 'debian-10' not in image_family:
        mount_disk = '/dev/sdb2'
    else:
        mount_disk = '/dev/sdb1'
    subprocess.run(['mount', mount_disk, '/mnt'], check=False)
    logging.info('Mount %s device to /mnt', mount_disk)

    if distribution == 'enterprise_linux':
        # chroot prevents access to /dev/random and /dev/urandom (as designed).
        # The rpm required those random bits to initialize GnuTLS otherwise
        # error: Failed to initialize NSS library.
        subprocess.run(['mount', '-o', 'bind', '/dev', '/mnt/dev'],
                       check=False)

    if distribution == 'debian':
        #  This package is debian-only.
        guest_packages.append('google-cloud-packages-archive-keyring')
        cmd_prefix = [
            'chroot', '/mnt', 'dpkg-query', '-W', '--showformat',
            '${Package}\n\n${Version}\n${Git}'
        ]
    elif distribution == 'enterprise_linux':
        cmd_prefix = [
            'chroot', '/mnt', 'rpm', '-q', '--queryformat',
            '%{NAME}\n%{EPOCH}\n%{VERSION}-%{RELEASE}\n%{VCS}'
        ]
    else:
        logging.error('Unknown Linux distribution.')
        return

    for package in guest_packages:
        try:
            process = subprocess.run(cmd_prefix + [package],
                                     capture_output=True,
                                     check=True)
        except subprocess.CalledProcessError as e:
            logging.info('failed to execute cmd: %s stdout: %s stderr: %s', e,
                         e.stdout, e.stderr)
            continue

        stdout = process.stdout.decode()

        try:
            package, epoch, version, commit_hash = stdout.split('\n', 3)
        except ValueError:
            logging.info('command result was malformed: %s', stdout)
            continue

        md = make_pkg_metadata(package, version, epoch, commit_hash)
        image['packages'].append(md)

    # Write image metadata to a file.
    with tempfile.NamedTemporaryFile(mode='w', dir='/tmp', delete=False) as f:
        f.write(json.dumps(image))

    # We upload the result to the daisy outs path as well, to aid in
    # troubleshooting.
    logging.info('Uploading image metadata to daisy outs path.')
    try:
        utils.UploadFile(f.name, outs_path + "/metadata.json")
    except Exception as e:
        logging.error('Failed uploading metadata file %s', e)
        return

    logging.success('Export metadata was successful!')
Exemple #8
0
def main():
  # Get Parameters
  repo = utils.GetMetadataAttribute('google_cloud_repo',
                    raise_on_not_found=True)
  release = utils.GetMetadataAttribute('el_release', raise_on_not_found=True)
  daisy_logs_path = utils.GetMetadataAttribute('daisy-logs-path',
                                               raise_on_not_found=True)
  savelogs = utils.GetMetadataAttribute('el_savelogs') == 'true'
  byos = utils.GetMetadataAttribute('rhel_byos') == 'true'
  sap = utils.GetMetadataAttribute('rhel_sap') == 'true'
  uefi = utils.GetMetadataAttribute('rhel_uefi') == 'true'

  logging.info('EL Release: %s' % release)
  logging.info('Google Cloud repo: %s' % repo)
  logging.info('Build working directory: %s' % os.getcwd())

  iso_file = '/files/installer.iso'

  # Necessary libs and tools to build the installer disk.
  utils.AptGetInstall(['dosfstools', 'rsync'])

  # Build the kickstart file.
  ks_content = ks_helpers.BuildKsConfig(release, repo, byos, sap, uefi)
  ks_cfg = 'ks.cfg'
  utils.WriteFile(ks_cfg, ks_content)
  # Save the generated kickstart file to the build logs.
  utils.UploadFile(ks_cfg, '%s/ks.cfg' % daisy_logs_path)

  # Write the installer disk. Write GPT label, create partition,
  # copy installer boot files over.
  logging.info('Writing installer disk.')
  utils.Execute(['parted', '/dev/sdb', 'mklabel', 'gpt'])
  utils.Execute(['sync'])
  utils.Execute(['parted', '/dev/sdb', 'mkpart', 'primary', 'fat32', '1MB',
                 '1024MB'])
  utils.Execute(['sync'])
  utils.Execute(['parted', '/dev/sdb', 'mkpart', 'primary', 'ext2', '1024MB',
                 '100%'])
  utils.Execute(['sync'])
  utils.Execute(['parted', '/dev/sdb', 'set', '1', 'boot', 'on'])
  utils.Execute(['sync'])
  utils.Execute(['parted', '/dev/sdb', 'set', '1', 'esp', 'on'])
  utils.Execute(['sync'])
  utils.Execute(['mkfs.vfat', '-F', '32', '/dev/sdb1'])
  utils.Execute(['sync'])
  utils.Execute(['fatlabel', '/dev/sdb1', 'ESP'])
  utils.Execute(['sync'])
  utils.Execute(['mkfs.ext2', '-L', 'INSTALLER', '/dev/sdb2'])
  utils.Execute(['sync'])

  utils.Execute(['mkdir', '-vp', 'iso', 'installer', 'boot'])
  utils.Execute(['mount', '-o', 'ro,loop', '-t', 'iso9660', iso_file, 'iso'])
  utils.Execute(['mount', '-t', 'vfat', '/dev/sdb1', 'boot'])
  utils.Execute(['mount', '-t', 'ext2', '/dev/sdb2', 'installer'])
  utils.Execute(['rsync', '-Pav', 'iso/EFI', 'iso/images', 'boot/'])
  utils.Execute(['cp', iso_file, 'installer/'])
  utils.Execute(['cp', ks_cfg, 'installer/'])

  # Modify boot config.
  with open('boot/EFI/BOOT/grub.cfg', 'r+') as f:
    oldcfg = f.read()
    cfg = re.sub(r'-l .RHEL.*', r"""-l 'ESP'""", oldcfg)
    cfg = re.sub(r'timeout=60', 'timeout=1', cfg)
    cfg = re.sub(r'set default=.*', 'set default="0"', cfg)
    cfg = re.sub(r'load_video\n',
           r'serial --speed=38400 --unit=0 --word=8 --parity=no\n'
           'terminal_input serial\nterminal_output serial\n', cfg)

    # Change boot args.
    args = ' '.join([
      'text', 'ks=hd:LABEL=INSTALLER:/%s' % ks_cfg,
      'console=ttyS0,38400n8', 'inst.gpt', 'loglevel=debug'
    ])

    # Tell Anaconda not to store its logs in the installed image,
    # unless requested to keep them for debugging.
    if not savelogs:
      args += ' inst.nosave=all'
    cfg = re.sub(r'inst\.stage2.*', r'\g<0> %s' % args, cfg)

    # Change labels to explicit partitions.
    if release.startswith(('centos7', 'rhel7', 'rhel-7', 'oraclelinux7',
                           'centos8', 'rhel8', 'rhel-8')):
      cfg = re.sub(r'LABEL=[^ ]+', 'LABEL=INSTALLER', cfg)

    # Print out a the modifications.
    diff = difflib.Differ().compare(
        oldcfg.splitlines(1),
        cfg.splitlines(1))
    logging.info('Modified grub.cfg:\n%s' % '\n'.join(diff))

    f.seek(0)
    f.write(cfg)
    f.truncate()

  utils.Execute(['umount', 'installer'])
  utils.Execute(['umount', 'iso'])
  utils.Execute(['umount', 'boot'])
def main():
    # Get Parameters
    repo = utils.GetMetadataAttribute('google_cloud_repo',
                                      raise_on_not_found=True)
    release = utils.GetMetadataAttribute('el_release', raise_on_not_found=True)
    daisy_logs_path = utils.GetMetadataAttribute('daisy-logs-path',
                                                 raise_on_not_found=True)
    savelogs = utils.GetMetadataAttribute('el_savelogs',
                                          raise_on_not_found=False) == 'true'
    byos = utils.GetMetadataAttribute('rhel_byos',
                                      raise_on_not_found=False) == 'true'
    sap = utils.GetMetadataAttribute('rhel_sap',
                                     raise_on_not_found=False) == 'true'

    logging.info('EL Release: %s' % release)
    logging.info('Google Cloud repo: %s' % repo)
    logging.info('Build working directory: %s' % os.getcwd())

    iso_file = '/files/installer.iso'

    # Necessary libs and tools to build the installer disk.
    utils.AptGetInstall(['extlinux', 'rsync'])

    # Build the kickstart file.
    uefi = False
    ks_content = ks_helpers.BuildKsConfig(release, repo, byos, sap, uefi)
    ks_cfg = 'ks.cfg'
    utils.WriteFile(ks_cfg, ks_content)
    # Save the generated kickstart file to the build logs.
    utils.UploadFile(ks_cfg, '%s/ks.cfg' % daisy_logs_path)

    # Write the installer disk. Write extlinux MBR, create partition,
    # copy installer ISO and ISO boot files over.
    logging.info('Writing installer disk.')
    utils.Execute(['parted', '/dev/sdb', 'mklabel', 'msdos'])
    utils.Execute(['sync'])
    utils.Execute(['parted', '/dev/sdb', 'mkpart', 'primary', '1MB', '100%'])
    utils.Execute(['sync'])
    utils.Execute(['parted', '/dev/sdb', 'set', '1', 'boot', 'on'])
    utils.Execute(['sync'])
    utils.Execute(['dd', 'if=/usr/lib/EXTLINUX/mbr.bin', 'of=/dev/sdb'])
    utils.Execute(['sync'])
    utils.Execute(['mkfs.ext2', '-L', 'INSTALLER', '/dev/sdb1'])
    utils.Execute(['sync'])
    utils.Execute(['mkdir', 'iso', 'installer'])
    utils.Execute(['mount', '-o', 'ro,loop', '-t', 'iso9660', iso_file, 'iso'])
    utils.Execute(['mount', '-t', 'ext2', '/dev/sdb1', 'installer'])
    utils.Execute(
        ['rsync', '-Pav', 'iso/images', 'iso/isolinux', 'installer/'])
    utils.Execute(['cp', iso_file, 'installer/'])
    utils.Execute(['cp', ks_cfg, 'installer/'])

    # Modify boot files on installer disk.
    utils.Execute(['mv', 'installer/isolinux', 'installer/extlinux'])
    utils.Execute([
        'mv', 'installer/extlinux/isolinux.cfg',
        'installer/extlinux/extlinux.conf'
    ])

    # Modify boot config.
    with open('installer/extlinux/extlinux.conf', 'r+') as f:
        oldcfg = f.read()
        cfg = re.sub(r'^default.*', r'default linux', oldcfg, count=1)

        # Change boot args.
        args = ' '.join([
            'text',
            'ks=hd:/dev/sda1:/%s' % ks_cfg, 'console=ttyS0,38400n8',
            'loglevel=debug'
        ])
        # Tell Anaconda not to store its logs in the installed image,
        # unless requested to keep them for debugging.
        if not savelogs:
            args += ' inst.nosave=all'
        cfg = re.sub(r'append initrd=initrd\.img.*', r'\g<0> %s' % args, cfg)

        # Change labels to explicit partitions.
        if release.startswith(('centos7', 'rhel7', 'rhel-7', 'oraclelinux7',
                               'centos8', 'rhel8')):
            cfg = re.sub(r'LABEL=[^ ]+', 'LABEL=INSTALLER', cfg)

        # Print out a the modifications.
        diff = difflib.Differ().compare(oldcfg.splitlines(1),
                                        cfg.splitlines(1))
        logging.info('Modified extlinux.conf:\n%s' % '\n'.join(diff))

        f.seek(0)
        f.write(cfg)
        f.truncate()

    # Activate extlinux.
    utils.Execute(['extlinux', '--install', 'installer/extlinux'])
Exemple #10
0
def main():
    # Get Parameters.
    build_date = utils.GetMetadataAttribute('build_date',
                                            raise_on_not_found=True)
    debian_cloud_images_version = utils.GetMetadataAttribute(
        'debian_cloud_images_version', raise_on_not_found=True)
    debian_version = utils.GetMetadataAttribute('debian_version',
                                                raise_on_not_found=True)
    google_cloud_repo = utils.GetMetadataAttribute('google_cloud_repo',
                                                   raise_on_not_found=True)
    outs_path = utils.GetMetadataAttribute('daisy-outs-path',
                                           raise_on_not_found=True)

    logging.info('debian-cloud-images version: %s' %
                 debian_cloud_images_version)
    logging.info('debian version: %s' % debian_version)

    # force an apt-get update before next install
    utils.AptGetInstall.first_run = True
    utils.AptGetInstall(['fai-server', 'fai-setup-storage'])

    # Download and setup debian's debian-cloud-images scripts.
    url_params = {
        'project': 'debian-cloud-images',
        'version': debian_cloud_images_version,
    }
    url_params['filename'] = '%(project)s-%(version)s' % url_params

    url = ('https://salsa.debian.org/cloud-team/'
           '%(project)s/-/archive/%(version)s/%(filename)s.tar.gz' %
           url_params)
    logging.info('Downloading %(project)s at version %(version)s' % url_params)
    urllib.request.urlretrieve(url, 'fci.tar.gz')
    with tarfile.open('fci.tar.gz') as tar:
        tar.extractall()
    logging.info('Downloaded and extracted %s.' % url)

    # Config fai-tool
    work_dir = url_params['filename']
    fai_classes = [
        'DEBIAN', 'CLOUD', 'GCE', 'GCE_SDK', 'AMD64', 'GRUB_CLOUD_AMD64',
        'LINUX_IMAGE_CLOUD'
    ]
    if debian_version == 'buster':
        fai_classes += ['BUSTER', 'BACKPORTS']
    elif debian_version == "bullseye":
        fai_classes += ['BULLSEYE']
    elif debian_version == 'sid':
        fai_classes += ['SID']
    image_size = '10G'
    disk_name = 'disk.raw'
    config_space = os.getcwd() + work_dir + '/config_space/'
    apt_sources_base = 'files/etc/apt/sources.list.d/'

    # Copy GCE_SPECIFIC fai classes.
    CopyToConfigSpace('/files/fai_config/packages/GCE_SPECIFIC',
                      'package_config/GCE_SPECIFIC', config_space)
    os.mkdir(config_space + apt_sources_base + 'google-cloud.list')
    CopyToConfigSpace('/files/fai_config/sources/GCE_SPECIFIC',
                      apt_sources_base + 'google-cloud.list/GCE_SPECIFIC',
                      config_space)
    CopyToConfigSpace('/files/fai_config/sources/file_modes',
                      apt_sources_base + '/google-cloud.list/file_modes',
                      config_space)
    CopyToConfigSpace('/files/fai_config/sources/repository.GCE_SPECIFIC',
                      'hooks/repository.GCE_SPECIFIC', config_space)
    fai_classes += ['GCE_SPECIFIC']

    # GCE staging package repo.
    if google_cloud_repo == 'staging' or google_cloud_repo == 'unstable':
        os.mkdir(config_space + apt_sources_base + 'google-cloud-staging.list')
        CopyToConfigSpace(
            '/files/fai_config/sources/GCE_STAGING',
            apt_sources_base + 'google-cloud-staging.list/GCE_STAGING',
            config_space)
        CopyToConfigSpace(
            '/files/fai_config/sources/file_modes',
            apt_sources_base + 'google-cloud-staging.list/file_modes',
            config_space)
        CopyToConfigSpace('/files/fai_config/sources/repository.GCE_STAGING',
                          'hooks/repository.GCE_STAGING', config_space)
        fai_classes += ['GCE_STAGING']

    # GCE unstable package repo.
    if google_cloud_repo == 'unstable':
        os.mkdir(config_space + apt_sources_base +
                 'google-cloud-unstable.list')
        CopyToConfigSpace(
            '/files/fai_config/sources/GCE_UNSTABLE',
            apt_sources_base + 'google-cloud-unstable.list/GCE_UNSTABLE',
            config_space)
        CopyToConfigSpace(
            '/files/fai_config/sources/file_modes',
            apt_sources_base + 'google-cloud-unstable.list/file_modes',
            config_space)
        CopyToConfigSpace('/files/fai_config/sources/file_modes',
                          'hooks/repository.GCE_UNSTABLE', config_space)
        fai_classes += ['GCE_UNSTABLE']

    # Cleanup class for GCE.
    os.mkdir(config_space + 'scripts/GCE_CLEAN')
    CopyToConfigSpace('/files/fai_config/scripts/10-gce-clean',
                      'scripts/GCE_CLEAN/10-gce-clean', config_space)
    os.chmod(config_space + 'scripts/GCE_CLEAN/10-gce-clean', 0o755)
    fai_classes += ['GCE_CLEAN']

    # Remove failing test method for now.
    os.remove(config_space + 'hooks/tests.CLOUD')

    # Run fai-tool.
    cmd = [
        'fai-diskimage', '--verbose', '--hostname', 'debian', '--class',
        ','.join(fai_classes), '--size', image_size, '--cspace', config_space,
        disk_name
    ]
    logging.info('Starting build in %s with params: %s' %
                 (work_dir, ' '.join(cmd)))
    utils.Execute(cmd, cwd=work_dir, capture_output=True)

    # Packs a gzipped tar file with disk.raw inside
    disk_tar_gz = 'debian-{}-{}.tar.gz'.format(debian_version, build_date)
    logging.info('Compressing it into tarball %s' % disk_tar_gz)
    tar = tarfile.open(disk_tar_gz, 'w:gz')
    tar.add('%s/disk.raw' % work_dir, arcname='disk.raw')
    tar.close()

    # Upload tar.
    image_dest = os.path.join(outs_path, 'root.tar.gz')
    logging.info('Saving %s to %s' % (disk_tar_gz, image_dest))
    utils.UploadFile(disk_tar_gz, image_dest)
def main():
    # Get parameters from instance metadata.
    metadata_dest = utils.GetMetadataAttribute('metadata_dest',
                                               raise_on_not_found=True)
    image_id = utils.GetMetadataAttribute('image_id')
    image_name = utils.GetMetadataAttribute('image_name')
    image_family = utils.GetMetadataAttribute('image_family')
    distribution = utils.GetMetadataAttribute('distribution',
                                              raise_on_not_found=True)
    uefi = utils.GetMetadataAttribute('uefi', 'false').lower() == 'true'

    logging.info('Creating upload metadata of the image and packages.')

    utc_time = datetime.datetime.now(datetime.timezone.utc)
    image_version = utc_time.strftime('%Y%m%d')
    build_date = utc_time.astimezone().isoformat()
    image = {
        'id': image_id,
        'name': image_name,
        'family': image_family,
        'version': image_version,
        'build_date': build_date,
        'packages': [],
    }
    # All the guest environment packages maintained by guest-os team.
    guest_packages = [
        'google-cloud-packages-archive-keyring',
        'google-compute-engine',
        'google-compute-engine-oslogin',
        'google-guest-agent',
        'google-osconfig-agent',
        'gce-disk-expand',
    ]

    # This assumes that:
    # 1. /dev/sdb1 is the EFI system partition.
    # 2. /dev/sdb2 is the root mount for the installed system.
    if uefi:
        mount_disk = '/dev/sdb2'
    else:
        mount_disk = '/dev/sdb1'
    subprocess.run(['mount', mount_disk, '/mnt'], check=False)
    logging.info('Mount %s device to /mnt', mount_disk)

    if distribution == 'enterprise_linux':
        # chroot prevents access to /dev/random and /dev/urandom (as designed).
        # The rpm required those random bits to initialize GnuTLS otherwise
        # error: Failed to initialize NSS library.
        subprocess.run(['mount', '-o', 'bind', '/dev', '/mnt/dev'],
                       check=False)

    has_commit_hash = True
    if distribution == 'debian':
        cmd_prefix = [
            'chroot', '/mnt', 'dpkg-query', '-W', '--showformat',
            '${Package}\n${Version}\n${Git}'
        ]
    elif distribution == 'enterprise_linux':
        if 'centos-6' in image_family or 'rhel-6' in image_family:
            # centos-6 and rhel-6 doesn't support vcs tag
            cmd_prefix = [
                'chroot', '/mnt', 'rpm', '-q', '--queryformat',
                '%{NAME}\n%{VERSION}-%{RELEASE}'
            ]
            has_commit_hash = False
        else:
            cmd_prefix = [
                'chroot', '/mnt', 'rpm', '-q', '--queryformat',
                '%{NAME}\n%{VERSION}-%{RELEASE}\n%{VCS}'
            ]
    else:
        logging.error('Unknown Linux distribution.')
        return Exception

    version, commit_hash = '', ''
    for package in guest_packages:
        cmd = cmd_prefix + [package]
        try:
            stdout = subprocess.run(cmd, stdout=subprocess.PIPE,
                                    check=True).stdout
            stdout = stdout.decode()
            logging.info('Package metadata is %s', stdout)
        except subprocess.CalledProcessError as e:
            logging.warning('Fail to execute cmd. %s', e)
            continue
        if has_commit_hash:
            package, version, commit_hash = stdout.split('\n', 2)
        else:
            package, version = stdout.split('\n', 1)
        package_metadata = {
            'name': package,
            'version': version,
            'commit_hash': commit_hash,
        }
        image['packages'].append(package_metadata)

    # Write image metadata to a file.
    with tempfile.NamedTemporaryFile(mode='w', dir='/tmp', delete=False) as f:
        f.write(json.dumps(image))

    logging.info('Uploading image metadata.')
    try:
        utils.UploadFile(f.name, metadata_dest)
    except ValueError as e:
        logging.exception('ExportFailed: Failed uploading metadata file %s', e)
        sys.exit(1)

    logging.info('ExportSuccess: Export metadata was successful!')
def main():
  # Get Parameters.
  build_date = utils.GetMetadataAttribute(
      'build_date', raise_on_not_found=True)
  debian_cloud_images_version = '69783f7417aefb332d5d7250ba242adeca444131'
  debian_version = utils.GetMetadataAttribute(
      'debian_version', raise_on_not_found=True)
  outs_path = utils.GetMetadataAttribute('daisy-outs-path',
                                         raise_on_not_found=True)

  logging.info('debian-cloud-images version: %s' % debian_cloud_images_version)
  logging.info('debian version: %s' % debian_version)

  # force an apt-get update before next install
  utils.AptGetInstall.first_run = True
  utils.AptGetInstall(['fai-server', 'fai-setup-storage'])

  # Download and setup debian's debian-cloud-images scripts.
  url_params = {
      'project': 'debian-cloud-images',
      'version': debian_cloud_images_version,
  }
  url_params['filename'] = '%(project)s-%(version)s' % url_params

  url = ('https://salsa.debian.org/cloud-team/'
         '%(project)s/-/archive/%(version)s/%(filename)s.tar.gz' % url_params)

  logging.info('Downloading %(project)s at version %(version)s', url_params)
  urllib.request.urlretrieve(url, 'fci.tar.gz')
  with tarfile.open('fci.tar.gz') as tar:
    tar.extractall()
  logging.info('Downloaded and extracted %s.', url)

  work_dir = url_params['filename']
  config_space = os.getcwd() + '/' + work_dir + '/config_space/'

  # We are going to replace this with our variant
  os.remove(config_space + 'class/BULLSEYE.var')

  # Remove failing test method for now.
  os.remove(config_space + 'hooks/tests.CLOUD')

  # Copy our classes to the FAI config space
  mycopytree('/files/fai_config', config_space)

  # Set scripts executable (daisy doesn't preserve this)
  os.chmod(config_space + 'scripts/GCE_CLEAN/10-gce-clean', 0o755)
  os.chmod(config_space + 'scripts/GCE_SPECIFIC/12-sshd', 0o755)

  # Config fai-tool
  # Base classes
  fai_classes = ['DEBIAN', 'CLOUD', 'GCE', 'GCE_SDK', 'LINUX_IMAGE_CLOUD',
                 'GCE_SPECIFIC', 'GCE_CLEAN']

  # Arch-specific classes
  if platform.machine() == 'aarch64':
    fai_classes += ['ARM64', 'GRUB_EFI_ARM64', 'BACKPORTS_LINUX']
  else:
    fai_classes += ['AMD64', 'GRUB_CLOUD_AMD64']

  # Version-specific classes
  if debian_version == 'buster':
    fai_classes += ['BUSTER']
  elif debian_version == 'bullseye':
    fai_classes += ['BULLSEYE']
  elif debian_version == 'sid':
    fai_classes += ['SID']

  image_size = '10G'
  disk_name = 'disk.raw'

  # Run fai-tool.
  cmd = ['fai-diskimage', '--verbose', '--hostname', 'debian', '--class',
         ','.join(fai_classes), '--size', image_size, '--cspace',
         config_space, disk_name]
  logging.info('Starting build in %s with params: %s', work_dir, ' '.join(cmd))
  returncode, output = utils.Execute(
      cmd, cwd=work_dir, capture_output=True, raise_errors=False)

  # Verbose printing to console for debugging.
  for line in output.splitlines():
    print(line)

  if returncode != 0:
    raise subprocess.CalledProcessError(returncode, cmd)

  # Packs a gzipped tar file with disk.raw inside
  disk_tar_gz = 'debian-{}-{}.tar.gz'.format(debian_version, build_date)
  logging.info('Compressing it into tarball %s', disk_tar_gz)
  tar = tarfile.open(disk_tar_gz, 'w:gz', format=tarfile.GNU_FORMAT)
  tar.add('%s/%s' % (work_dir, disk_name), arcname=disk_name)
  tar.close()

  # Upload tar.
  image_dest = os.path.join(outs_path, 'root.tar.gz')
  logging.info('Saving %s to %s', disk_tar_gz, image_dest)
  utils.UploadFile(disk_tar_gz, image_dest)