コード例 #1
0
def main():
    utils.AptGetInstall(['libguestfs-tools'])
    disk = '/dev/sdb'
    g = diskutils.MountDisk(disk)
    DistroSpecific(g)
    utils.CommonRoutines(g)
    diskutils.UnmountDisk(g)
    utils.Execute(['virt-customize', '-a', disk, '--selinux-relabel'])
コード例 #2
0
  ['stable' (default), 'unstable', 'staging'].
image_dest: The Cloud Storage destination for the resultant image.
"""

import collections
import json
import logging
import os
import shutil
import urllib
import zipfile

import utils

utils.AptGetInstall(
    ['git', 'python-pip', 'qemu-utils', 'parted', 'kpartx', 'debootstrap',
     'python-yaml'])
utils.PipInstall(
    ['termcolor', 'fysom', 'jsonschema', 'docopt', 'functools32'])

import yaml  # noqa: E402,I202

BVZ_DIR = '/bvz'
REPOS = ['stable', 'unstable', 'staging']


def main():
  # Get Parameters.
  bvz_manifest = utils.GetMetadataAttribute(
      'bootstrap_vz_manifest', raise_on_not_found=True)
  bvz_version = utils.GetMetadataAttribute(
コード例 #3
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Translate the EL image on a GCE VM.

Parameters (retrieved from instance metadata):

debian_release: The version of the distro (stretch)
install_gce_packages: True if GCE agent and SDK should be installed
"""

import logging

import utils

utils.AptGetInstall(['python-guestfs'])

import guestfs  # flake8: noqa: E402

google_cloud = '''
deb http://packages.cloud.google.com/apt cloud-sdk-{deb_release} main
deb http://packages.cloud.google.com/apt google-compute-engine-{deb_release}-stable main
deb http://packages.cloud.google.com/apt google-cloud-packages-archive-keyring-{deb_release} main
'''

interfaces = '''
source-directory /etc/network/interfaces.d
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet dhcp
コード例 #4
0
def main():
    utils.AptGetInstall(['tinyproxy'])
    g = diskutils.MountDisk('/dev/sdb')
    DistroSpecific(g)
    utils.CommonRoutines(g)
    diskutils.UnmountDisk(g)
コード例 #5
0
def main():
    # Get Parameters
    repo = utils.GetMetadataAttribute('google_cloud_repo',
                                      raise_on_not_found=True)
    release = utils.GetMetadataAttribute('el_release', raise_on_not_found=True)
    savelogs = utils.GetMetadataAttribute('el_savelogs',
                                          raise_on_not_found=False) == 'true'
    byos = utils.GetMetadataAttribute('rhel_byos',
                                      raise_on_not_found=False) == 'true'
    sap = utils.GetMetadataAttribute('rhel_sap',
                                     raise_on_not_found=False) == 'true'
    nge = utils.GetMetadataAttribute('new_guest',
                                     raise_on_not_found=False) == 'true'

    logging.info('EL Release: %s' % release)
    logging.info('Google Cloud repo: %s' % repo)
    logging.info('Build working directory: %s' % os.getcwd())

    iso_file = '/files/installer.iso'

    # Necessary libs and tools to build the installer disk.
    utils.AptGetInstall(['extlinux', 'rsync'])

    # Build the kickstart file.
    uefi = False
    ks_content = ks_helpers.BuildKsConfig(release, repo, byos, sap, uefi, nge)
    ks_cfg = 'ks.cfg'
    utils.WriteFile(ks_cfg, ks_content)

    # Write the installer disk. Write extlinux MBR, create partition,
    # copy installer ISO and ISO boot files over.
    logging.info('Writing installer disk.')
    utils.Execute(['parted', '/dev/sdb', 'mklabel', 'msdos'])
    utils.Execute(['sync'])
    utils.Execute(['parted', '/dev/sdb', 'mkpart', 'primary', '1MB', '100%'])
    utils.Execute(['sync'])
    utils.Execute(['parted', '/dev/sdb', 'set', '1', 'boot', 'on'])
    utils.Execute(['sync'])
    utils.Execute(['dd', 'if=/usr/lib/EXTLINUX/mbr.bin', 'of=/dev/sdb'])
    utils.Execute(['sync'])
    utils.Execute(['mkfs.ext2', '-L', 'INSTALLER', '/dev/sdb1'])
    utils.Execute(['sync'])
    utils.Execute(['mkdir', 'iso', 'installer'])
    utils.Execute(['mount', '-o', 'ro,loop', '-t', 'iso9660', iso_file, 'iso'])
    utils.Execute(['mount', '-t', 'ext2', '/dev/sdb1', 'installer'])
    utils.Execute(
        ['rsync', '-Pav', 'iso/images', 'iso/isolinux', 'installer/'])
    utils.Execute(['cp', iso_file, 'installer/'])
    utils.Execute(['cp', ks_cfg, 'installer/'])

    # Modify boot files on installer disk.
    utils.Execute(['mv', 'installer/isolinux', 'installer/extlinux'])
    utils.Execute([
        'mv', 'installer/extlinux/isolinux.cfg',
        'installer/extlinux/extlinux.conf'
    ])

    # Modify boot config.
    with open('installer/extlinux/extlinux.conf', 'r+') as f:
        oldcfg = f.read()
        cfg = re.sub(r'^default.*', r'default linux', oldcfg, count=1)

        # Change boot args.
        args = ' '.join([
            'text',
            'ks=hd:/dev/sda1:/%s' % ks_cfg, 'console=ttyS0,38400n8',
            'loglevel=debug'
        ])
        # Tell Anaconda not to store its logs in the installed image,
        # unless requested to keep them for debugging.
        if not savelogs:
            args += ' inst.nosave=all'
        cfg = re.sub(r'append initrd=initrd\.img.*', r'\g<0> %s' % args, cfg)

        # Change labels to explicit partitions.
        if release.startswith(('centos7', 'rhel7', 'rhel-7', 'oraclelinux7',
                               'centos8', 'rhel8')):
            cfg = re.sub(r'LABEL=[^ ]+', 'LABEL=INSTALLER', cfg)

        # Print out a the modifications.
        diff = difflib.Differ().compare(oldcfg.splitlines(1),
                                        cfg.splitlines(1))
        logging.info('Modified extlinux.conf:\n%s' % '\n'.join(diff))

        f.seek(0)
        f.write(cfg)
        f.truncate()

    # Activate extlinux.
    utils.Execute(['extlinux', '--install', 'installer/extlinux'])
コード例 #6
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


import logging
import re
import time
import uuid

import utils

utils.AptGetInstall(['python-pip'])
utils.Execute(['pip', 'install', '--upgrade', 'google-api-python-client'])

from googleapiclient import discovery
from oauth2client.client import GoogleCredentials

TESTEE = None
PROJECT = None
ZONE = None
COMPUTE = None
SSH_KEYS = 'ssh-keys'
SSHKEYS = 'sshKeys'
INSTANCE_LEVEL = 1
PROJECT_LEVEL = 2

コード例 #7
0
def main():
  # Get Parameters.
  debian_cloud_images_version = utils.GetMetadataAttribute(
      'debian_cloud_images_version', raise_on_not_found=True)
  debian_version = utils.GetMetadataAttribute(
      'debian_version', raise_on_not_found=True)
  uefi = utils.GetMetadataAttribute('uefi', raise_on_not_found=True)
  image_dest = utils.GetMetadataAttribute('image_dest',
      raise_on_not_found=True)
  outs_path = utils.GetMetadataAttribute('daisy-outs-path',
      raise_on_not_found=True)

  logging.info('debian-cloud-images version: %s' % debian_cloud_images_version)
  logging.info('debian version: %s' % debian_version)

  # First, install fai-client from fai-project repository
  key_url = 'https://fai-project.org/download/2BF8D9FE074BCDE4.asc'
  urllib.urlretrieve(key_url, 'key.asc')
  utils.Execute(['apt-key', 'add', 'key.asc'])
  with open('/etc/apt/sources.list.d/fai-project.list', 'w') as fai_list:
    fai_list.write('deb https://fai-project.org/download stretch koeln')

  # force an apt-get update before next install
  utils.AptGetInstall.first_run = True
  utils.AptGetInstall(['fai-server', 'fai-setup-storage'])

  # Download and setup debian's debian-cloud-images scripts.
  url_params = {
      'project': 'debian-cloud-images',
      'commit': debian_cloud_images_version,
  }
  url_params['filename'] = '%(project)s-%(commit)s' % url_params

  url = "https://salsa.debian.org/cloud-team/" + \
      "%(project)s/-/archive/%(commit)s/%(filename)s.tar.gz" % url_params
  logging.info('Downloading %(project)s at commit %(commit)s' % url_params)
  urllib.urlretrieve(url, 'fci.tar.gz')
  with tarfile.open('fci.tar.gz') as tar:
    tar.extractall()
  logging.info('Downloaded and extracted %s.' % url)

  # Run fai-tool.
  work_dir = url_params['filename']
  fai_bin = 'bin/build'
  arch = 'amd64-efi' if uefi else 'amd64'
  cmd = [fai_bin, debian_version, 'gce', arch, 'disk']
  logging.info('Starting build in %s with params: %s' % (
      work_dir, ' '.join(cmd))
  )
  utils.Execute(cmd, cwd=work_dir, capture_output=True)

  # Packs a gzipped tar file with disk.raw inside
  disk_tar_gz = 'disk.tar.gz'
  logging.info('Compressing it into tarball %s' % disk_tar_gz)
  tar = tarfile.open(disk_tar_gz, "w:gz")
  tar.add('%s/disk.raw' % work_dir, arcname="disk.raw")
  tar.close()

  # Upload tar.
  logging.info('Saving %s to %s' % (disk_tar_gz, image_dest))
  utils.UploadFile(disk_tar_gz, image_dest)

  # Create and upload the synopsis of the image.
  logging.info('Creating image synopsis.')
  synopsis = {}
  packages = collections.OrderedDict()
  _, output = utils.Execute(['dpkg-query', '-W'], capture_output=True)
  for line in output.split('\n')[:-1]:  # Last line is an empty line.
    parts = line.split()
    packages[parts[0]] = parts[1]
  synopsis['installed_packages'] = packages
  with open('/tmp/synopsis.json', 'w') as f:
    f.write(json.dumps(synopsis))
  logging.info('Uploading image synopsis.')
  synopsis_dest = os.path.join(outs_path, 'synopsis.json')
  utils.UploadFile('/tmp/synopsis.json', synopsis_dest)
コード例 #8
0
  ['stable' (default), 'unstable', 'staging'].
image_dest: The Cloud Storage destination for the resultant image.
"""

import collections
import json
import logging
import os
import shutil
import urllib.request
import zipfile

import utils

utils.AptGetInstall(
    ['python-pip', 'python-termcolor', 'python-fysom', 'python-jsonschema',
     'python-yaml', 'python-docopt'])
utils.PipInstall(
    ['json_minify'])

import yaml  # noqa: E402,I202

BVZ_DIR = '/bvz'
REPOS = ['stable', 'unstable', 'staging']


def main():
  # Get Parameters.
  bvz_manifest = utils.GetMetadataAttribute(
      'bootstrap_vz_manifest', raise_on_not_found=True)
  bvz_url = utils.GetMetadataAttribute(
コード例 #9
0
# limitations under the License.
"""Translate the EL image on a GCE VM.

Parameters (retrieved from instance metadata):

el_release: The version of the distro (6 or 7)
install_gce_packages: True if GCE agent and SDK should be installed
use_rhel_gce_license: True if GCE RHUI package should be installed
"""

import logging
import os

import utils

utils.AptGetInstall(['python-guestfs', 'libguestfs-tools'])

import guestfs

repo_compute = '''
[google-cloud-compute]
name=Google Cloud Compute
baseurl=https://packages.cloud.google.com/yum/repos/google-cloud-compute-el%s-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
       https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
'''

repo_sdk = '''
コード例 #10
0
def main():
    # Get Parameters.
    build_date = utils.GetMetadataAttribute('build_date',
                                            raise_on_not_found=True)
    debian_cloud_images_version = utils.GetMetadataAttribute(
        'debian_cloud_images_version', raise_on_not_found=True)
    debian_version = utils.GetMetadataAttribute('debian_version',
                                                raise_on_not_found=True)
    image_dest = utils.GetMetadataAttribute('image_dest',
                                            raise_on_not_found=True)
    outs_path = utils.GetMetadataAttribute('daisy-outs-path',
                                           raise_on_not_found=True)

    logging.info('debian-cloud-images version: %s' %
                 debian_cloud_images_version)
    logging.info('debian version: %s' % debian_version)

    # force an apt-get update before next install
    utils.AptGetInstall.first_run = True

    debian_host_version = utils.Execute(['cat', '/etc/debian_version'],
                                        capture_output=True)
    # the FAI's version in stretch does not satisfy our need, so the version from
    # stretch-backports is needed.
    if debian_host_version[1].startswith('9'):
        utils.AptGetInstall(['fai-server', 'fai-setup-storage'],
                            'stretch-backports')
    else:
        utils.AptGetInstall(['fai-server', 'fai-setup-storage'])

    # Download and setup debian's debian-cloud-images scripts.
    url_params = {
        'project': 'debian-cloud-images',
        'version': debian_cloud_images_version,
    }
    url_params['filename'] = '%(project)s-%(version)s' % url_params

    url = "https://salsa.debian.org/cloud-team/" + \
        "%(project)s/-/archive/%(version)s/%(filename)s.tar.gz" % url_params
    logging.info('Downloading %(project)s at version %(version)s' % url_params)
    urllib.request.urlretrieve(url, 'fci.tar.gz')
    with tarfile.open('fci.tar.gz') as tar:
        tar.extractall()
    logging.info('Downloaded and extracted %s.' % url)

    # Config fai-tool
    work_dir = url_params['filename']
    fai_classes = [
        'DEBIAN', 'CLOUD', 'GCE', 'GCE_SDK', 'AMD64', 'GRUB_CLOUD_AMD64',
        'LINUX_IMAGE_CLOUD'
    ]
    if debian_version == 'stretch':
        fai_classes += ['STRETCH', 'BACKPORTS', 'BACKPORTS_LINUX']
    elif debian_version == 'buster':
        fai_classes += ['BUSTER']
    elif debian_version == 'sid':
        fai_classes += ['SID']
    image_size = '10G'
    disk_name = 'disk.raw'
    config_space = os.getcwd() + work_dir + '/config_space/'

    # Copy GCE_SPECIFIC fai class
    utils.Execute([
        'cp', '/files/fai_config/packages/GCE_SPECIFIC',
        config_space + 'package_config/GCE_SPECIFIC'
    ])
    fai_classes += ['GCE_SPECIFIC']

    # Run fai-tool.
    cmd = [
        'fai-diskimage', '--verbose', '--hostname', 'debian', '--class',
        ','.join(fai_classes), '--size', image_size, '--cspace', config_space,
        disk_name
    ]
    logging.info('Starting build in %s with params: %s' %
                 (work_dir, ' '.join(cmd)))
    utils.Execute(cmd, cwd=work_dir, capture_output=True)

    # Packs a gzipped tar file with disk.raw inside
    disk_tar_gz = 'debian-%s-%s.tar.gz' % [debian_version, build_date]
    logging.info('Compressing it into tarball %s' % disk_tar_gz)
    tar = tarfile.open(disk_tar_gz, "w:gz")
    tar.add('%s/disk.raw' % work_dir, arcname="disk.raw")
    tar.close()

    # Upload tar.
    logging.info('Saving %s to %s' % (disk_tar_gz, image_dest))
    utils.UploadFile(disk_tar_gz, image_dest)

    # Create and upload the synopsis of the image.
    logging.info('Creating image synopsis.')
    synopsis = {}
    packages = collections.OrderedDict()
    _, output = utils.Execute(['dpkg-query', '-W'], capture_output=True)
    for line in output.split('\n')[:-1]:  # Last line is an empty line.
        parts = line.split()
        packages[parts[0]] = parts[1]
    synopsis['installed_packages'] = packages
    with open('/tmp/synopsis.json', 'w') as f:
        f.write(json.dumps(synopsis))
    logging.info('Uploading image synopsis.')
    synopsis_dest = os.path.join(outs_path, 'synopsis.json')
    utils.UploadFile('/tmp/synopsis.json', synopsis_dest)
コード例 #11
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Translate the Ubuntu image on a GCE VM.

Parameters (retrieved from instance metadata):

ubuntu_release: The version of the distro
install_gce_packages: True if GCE agent and SDK should be installed
"""

import logging

import utils

utils.AptGetInstall(['python-guestfs', 'tinyproxy'])

import guestfs

tinyproxy_cfg = '''
User tinyproxy
Group tinyproxy
Port 8888
Timeout 600
LogLevel Info
PidFile "/run/tinyproxy/tinyproxy.pid"
MaxClients 100
MinSpareServers 5
MaxSpareServers 20
StartServers 10
MaxRequestsPerChild 0
コード例 #12
0
def main():
  # Get Parameters
  repo = utils.GetMetadataAttribute('google_cloud_repo',
                    raise_on_not_found=True)
  release = utils.GetMetadataAttribute('el_release', raise_on_not_found=True)
  daisy_logs_path = utils.GetMetadataAttribute('daisy-logs-path',
                                               raise_on_not_found=True)
  savelogs = utils.GetMetadataAttribute('el_savelogs') == 'true'
  byos = utils.GetMetadataAttribute('rhel_byos') == 'true'
  sap = utils.GetMetadataAttribute('rhel_sap') == 'true'

  logging.info('EL Release: %s' % release)
  logging.info('Google Cloud repo: %s' % repo)
  logging.info('Build working directory: %s' % os.getcwd())

  iso_file = '/files/installer.iso'

  utils.AptGetInstall(['rsync'])

  # Build the kickstart file.
  ks_content = ks_helpers.BuildKsConfig(release, repo, byos, sap)
  ks_cfg = 'ks.cfg'
  utils.WriteFile(ks_cfg, ks_content)
  # Save the generated kickstart file to the build logs.
  utils.UploadFile(ks_cfg, '%s/ks.cfg' % daisy_logs_path)

  # Write the installer disk. Write GPT label, create partition,
  # copy installer boot files over.
  logging.info('Writing installer disk.')
  utils.Execute(['parted', '/dev/sdb', 'mklabel', 'gpt'])
  utils.Execute(['sync'])
  utils.Execute(['parted', '/dev/sdb', 'mkpart', 'primary', 'fat32', '1MB',
                 '1024MB'])
  utils.Execute(['sync'])
  utils.Execute(['parted', '/dev/sdb', 'mkpart', 'primary', 'ext2', '1024MB',
                 '100%'])
  utils.Execute(['sync'])
  utils.Execute(['parted', '/dev/sdb', 'set', '1', 'boot', 'on'])
  utils.Execute(['sync'])
  utils.Execute(['parted', '/dev/sdb', 'set', '1', 'esp', 'on'])
  utils.Execute(['sync'])
  utils.Execute(['mkfs.vfat', '-F', '32', '/dev/sdb1'])
  utils.Execute(['sync'])
  utils.Execute(['fatlabel', '/dev/sdb1', 'ESP'])
  utils.Execute(['sync'])
  utils.Execute(['mkfs.ext2', '-L', 'INSTALLER', '/dev/sdb2'])
  utils.Execute(['sync'])

  utils.Execute(['mkdir', '-vp', 'iso', 'installer', 'boot'])
  utils.Execute(['mount', '-o', 'ro,loop', '-t', 'iso9660', iso_file, 'iso'])
  utils.Execute(['mount', '-t', 'vfat', '/dev/sdb1', 'boot'])
  utils.Execute(['mount', '-t', 'ext2', '/dev/sdb2', 'installer'])
  utils.Execute(['rsync', '-Pav', 'iso/EFI', 'iso/images', 'boot/'])
  utils.Execute(['cp', iso_file, 'installer/'])
  utils.Execute(['cp', ks_cfg, 'installer/'])

  # Modify boot config.
  with open('boot/EFI/BOOT/grub.cfg', 'r+') as f:
    oldcfg = f.read()
    cfg = re.sub(r'-l .RHEL.*', r"""-l 'ESP'""", oldcfg)
    cfg = re.sub(r'timeout=60', 'timeout=1', cfg)
    cfg = re.sub(r'set default=.*', 'set default="0"', cfg)
    cfg = re.sub(r'load_video\n',
           r'serial --speed=38400 --unit=0 --word=8 --parity=no\n'
           'terminal_input serial\nterminal_output serial\n', cfg)

    # Change boot args.
    args = ' '.join([
      'text', 'ks=hd:LABEL=INSTALLER:/%s' % ks_cfg,
      'console=ttyS0,38400n8', 'inst.gpt', 'loglevel=debug'
    ])

    # Tell Anaconda not to store its logs in the installed image,
    # unless requested to keep them for debugging.
    if not savelogs:
      args += ' inst.nosave=all'
    cfg = re.sub(r'inst\.stage2.*', r'\g<0> %s' % args, cfg)

    # Change labels to explicit partitions.
    cfg = re.sub(r'LABEL=[^ ]+', 'LABEL=INSTALLER', cfg)

    # Print out a the modifications.
    diff = difflib.Differ().compare(
        oldcfg.splitlines(1),
        cfg.splitlines(1))
    logging.info('Modified grub.cfg:\n%s' % '\n'.join(diff))

    f.seek(0)
    f.write(cfg)
    f.truncate()

  utils.Execute(['umount', 'installer'])
  utils.Execute(['umount', 'iso'])
  utils.Execute(['umount', 'boot'])
コード例 #13
0
def main():
    # Get Parameters.
    build_date = utils.GetMetadataAttribute('build_date',
                                            raise_on_not_found=True)
    debian_cloud_images_version = utils.GetMetadataAttribute(
        'debian_cloud_images_version', raise_on_not_found=True)
    debian_version = utils.GetMetadataAttribute('debian_version',
                                                raise_on_not_found=True)
    google_cloud_repo = utils.GetMetadataAttribute('google_cloud_repo',
                                                   raise_on_not_found=True)
    outs_path = utils.GetMetadataAttribute('daisy-outs-path',
                                           raise_on_not_found=True)

    logging.info('debian-cloud-images version: %s' %
                 debian_cloud_images_version)
    logging.info('debian version: %s' % debian_version)

    # force an apt-get update before next install
    utils.AptGetInstall.first_run = True
    utils.AptGetInstall(['fai-server', 'fai-setup-storage'])

    # Download and setup debian's debian-cloud-images scripts.
    url_params = {
        'project': 'debian-cloud-images',
        'version': debian_cloud_images_version,
    }
    url_params['filename'] = '%(project)s-%(version)s' % url_params

    url = ('https://salsa.debian.org/cloud-team/'
           '%(project)s/-/archive/%(version)s/%(filename)s.tar.gz' %
           url_params)
    logging.info('Downloading %(project)s at version %(version)s' % url_params)
    urllib.request.urlretrieve(url, 'fci.tar.gz')
    with tarfile.open('fci.tar.gz') as tar:
        tar.extractall()
    logging.info('Downloaded and extracted %s.' % url)

    # Config fai-tool
    work_dir = url_params['filename']
    fai_classes = [
        'DEBIAN', 'CLOUD', 'GCE', 'GCE_SDK', 'AMD64', 'GRUB_CLOUD_AMD64',
        'LINUX_IMAGE_CLOUD'
    ]
    if debian_version == 'buster':
        fai_classes += ['BUSTER', 'BACKPORTS']
    elif debian_version == "bullseye":
        fai_classes += ['BULLSEYE']
    elif debian_version == 'sid':
        fai_classes += ['SID']
    image_size = '10G'
    disk_name = 'disk.raw'
    config_space = os.getcwd() + work_dir + '/config_space/'
    apt_sources_base = 'files/etc/apt/sources.list.d/'

    # Copy GCE_SPECIFIC fai classes.
    CopyToConfigSpace('/files/fai_config/packages/GCE_SPECIFIC',
                      'package_config/GCE_SPECIFIC', config_space)
    os.mkdir(config_space + apt_sources_base + 'google-cloud.list')
    CopyToConfigSpace('/files/fai_config/sources/GCE_SPECIFIC',
                      apt_sources_base + 'google-cloud.list/GCE_SPECIFIC',
                      config_space)
    CopyToConfigSpace('/files/fai_config/sources/file_modes',
                      apt_sources_base + '/google-cloud.list/file_modes',
                      config_space)
    CopyToConfigSpace('/files/fai_config/sources/repository.GCE_SPECIFIC',
                      'hooks/repository.GCE_SPECIFIC', config_space)
    fai_classes += ['GCE_SPECIFIC']

    # GCE staging package repo.
    if google_cloud_repo == 'staging' or google_cloud_repo == 'unstable':
        os.mkdir(config_space + apt_sources_base + 'google-cloud-staging.list')
        CopyToConfigSpace(
            '/files/fai_config/sources/GCE_STAGING',
            apt_sources_base + 'google-cloud-staging.list/GCE_STAGING',
            config_space)
        CopyToConfigSpace(
            '/files/fai_config/sources/file_modes',
            apt_sources_base + 'google-cloud-staging.list/file_modes',
            config_space)
        CopyToConfigSpace('/files/fai_config/sources/repository.GCE_STAGING',
                          'hooks/repository.GCE_STAGING', config_space)
        fai_classes += ['GCE_STAGING']

    # GCE unstable package repo.
    if google_cloud_repo == 'unstable':
        os.mkdir(config_space + apt_sources_base +
                 'google-cloud-unstable.list')
        CopyToConfigSpace(
            '/files/fai_config/sources/GCE_UNSTABLE',
            apt_sources_base + 'google-cloud-unstable.list/GCE_UNSTABLE',
            config_space)
        CopyToConfigSpace(
            '/files/fai_config/sources/file_modes',
            apt_sources_base + 'google-cloud-unstable.list/file_modes',
            config_space)
        CopyToConfigSpace('/files/fai_config/sources/file_modes',
                          'hooks/repository.GCE_UNSTABLE', config_space)
        fai_classes += ['GCE_UNSTABLE']

    # Cleanup class for GCE.
    os.mkdir(config_space + 'scripts/GCE_CLEAN')
    CopyToConfigSpace('/files/fai_config/scripts/10-gce-clean',
                      'scripts/GCE_CLEAN/10-gce-clean', config_space)
    os.chmod(config_space + 'scripts/GCE_CLEAN/10-gce-clean', 0o755)
    fai_classes += ['GCE_CLEAN']

    # Remove failing test method for now.
    os.remove(config_space + 'hooks/tests.CLOUD')

    # Run fai-tool.
    cmd = [
        'fai-diskimage', '--verbose', '--hostname', 'debian', '--class',
        ','.join(fai_classes), '--size', image_size, '--cspace', config_space,
        disk_name
    ]
    logging.info('Starting build in %s with params: %s' %
                 (work_dir, ' '.join(cmd)))
    utils.Execute(cmd, cwd=work_dir, capture_output=True)

    # Packs a gzipped tar file with disk.raw inside
    disk_tar_gz = 'debian-{}-{}.tar.gz'.format(debian_version, build_date)
    logging.info('Compressing it into tarball %s' % disk_tar_gz)
    tar = tarfile.open(disk_tar_gz, 'w:gz')
    tar.add('%s/disk.raw' % work_dir, arcname='disk.raw')
    tar.close()

    # Upload tar.
    image_dest = os.path.join(outs_path, 'root.tar.gz')
    logging.info('Saving %s to %s' % (disk_tar_gz, image_dest))
    utils.UploadFile(disk_tar_gz, image_dest)
コード例 #14
0
ファイル: build_fai.py プロジェクト: pjh/compute-image-tools
def main():
    # Get Parameters.
    build_date = utils.GetMetadataAttribute('build_date',
                                            raise_on_not_found=True)
    debian_cloud_images_version = utils.GetMetadataAttribute(
        'debian_cloud_images_version', raise_on_not_found=True)
    debian_version = utils.GetMetadataAttribute('debian_version',
                                                raise_on_not_found=True)
    outs_path = utils.GetMetadataAttribute('daisy-outs-path',
                                           raise_on_not_found=True)

    logging.info('debian-cloud-images version: %s' %
                 debian_cloud_images_version)
    logging.info('debian version: %s' % debian_version)

    # force an apt-get update before next install
    utils.AptGetInstall.first_run = True
    utils.AptGetInstall(['fai-server', 'fai-setup-storage'])

    # Download and setup debian's debian-cloud-images scripts.
    url_params = {
        'project': 'debian-cloud-images',
        'version': debian_cloud_images_version,
    }
    url_params['filename'] = '%(project)s-%(version)s' % url_params

    url = ('https://salsa.debian.org/cloud-team/'
           '%(project)s/-/archive/%(version)s/%(filename)s.tar.gz' %
           url_params)

    logging.info('Downloading %(project)s at version %(version)s', url_params)
    urllib.request.urlretrieve(url, 'fci.tar.gz')
    with tarfile.open('fci.tar.gz') as tar:
        tar.extractall()
    logging.info('Downloaded and extracted %s.', url)

    # Copy our classes to the FAI config space
    work_dir = url_params['filename']
    config_space = os.getcwd() + work_dir + '/config_space/'
    mycopytree('/files/fai_config', config_space)

    # Remove failing test method for now.
    os.remove(config_space + 'hooks/tests.CLOUD')

    # Config fai-tool
    fai_classes = [
        'DEBIAN', 'CLOUD', 'GCE', 'GCE_SDK', 'AMD64', 'GRUB_CLOUD_AMD64',
        'LINUX_IMAGE_CLOUD', 'GCE_SPECIFIC', 'GCE_CLEAN'
    ]
    if debian_version == 'buster':
        fai_classes += ['BUSTER', 'BACKPORTS']
    elif debian_version == 'bullseye':
        fai_classes += ['BULLSEYE']
    elif debian_version == 'sid':
        fai_classes += ['SID']

    image_size = '10G'
    disk_name = 'disk.raw'

    # Run fai-tool.
    cmd = [
        'fai-diskimage', '--verbose', '--hostname', 'debian', '--class',
        ','.join(fai_classes), '--size', image_size, '--cspace', config_space,
        disk_name
    ]
    logging.info('Starting build in %s with params: %s', work_dir,
                 ' '.join(cmd))
    utils.Execute(cmd, cwd=work_dir, capture_output=True)

    # Packs a gzipped tar file with disk.raw inside
    disk_tar_gz = 'debian-{}-{}.tar.gz'.format(debian_version, build_date)
    logging.info('Compressing it into tarball %s', disk_tar_gz)
    tar = tarfile.open(disk_tar_gz, 'w:gz')
    tar.add('%s/disk.raw' % work_dir, arcname='disk.raw')
    tar.close()

    # Upload tar.
    image_dest = os.path.join(outs_path, 'root.tar.gz')
    logging.info('Saving %s to %s', disk_tar_gz, image_dest)
    utils.UploadFile(disk_tar_gz, image_dest)
コード例 #15
0
to use.
debian_version: The FAI tool debian version to be requested.
image_dest: The Cloud Storage destination for the resultant image.
"""

import collections
import json
import logging
import os
import tarfile
import urllib

import utils

# The following package is necessary to retrieve from https repositories.
utils.AptGetInstall(['apt-transport-https', 'qemu-utils', 'dosfstools'])


def main():
  # Get Parameters.
  debian_cloud_images_version = utils.GetMetadataAttribute(
      'debian_cloud_images_version', raise_on_not_found=True)
  debian_version = utils.GetMetadataAttribute(
      'debian_version', raise_on_not_found=True)
  uefi = utils.GetMetadataAttribute('uefi', raise_on_not_found=True)
  image_dest = utils.GetMetadataAttribute('image_dest',
      raise_on_not_found=True)
  outs_path = utils.GetMetadataAttribute('daisy-outs-path',
      raise_on_not_found=True)

  logging.info('debian-cloud-images version: %s' % debian_cloud_images_version)
コード例 #16
0
def main():
  # Get Parameters
  repo = utils.GetMetadataAttribute('google_cloud_repo',
                                    raise_on_not_found=True)
  release = utils.GetMetadataAttribute('el_release', raise_on_not_found=True)
  savelogs = utils.GetMetadataAttribute('el_savelogs',
                                        raise_on_not_found=False)
  savelogs = savelogs == 'true'
  byol = utils.GetMetadataAttribute('rhel_byol', raise_on_not_found=False)
  byol = byol == 'true'
  sap_hana = utils.GetMetadataAttribute('rhel_sap_hana',
                                        raise_on_not_found=False)
  sap_hana = sap_hana == 'true'
  sap_apps = utils.GetMetadataAttribute('rhel_sap_apps',
                                        raise_on_not_found=False)
  sap_apps = sap_apps == 'true'
  sap = utils.GetMetadataAttribute('rhel_sap', raise_on_not_found=False)
  sap = sap == 'true'
  logging.info('EL Release: %s' % release)
  logging.info('Google Cloud repo: %s' % repo)
  logging.info('Build working directory: %s' % os.getcwd())

  iso_file = 'installer.iso'

  # Necessary libs and tools to build the installer disk.
  utils.AptGetInstall(['dosfstools', 'rsync'])

  # Build the kickstart file.
  ks_content = ks_helpers.BuildKsConfig(release, repo, byol, sap, sap_hana,
                                        sap_apps)
  ks_cfg = 'ks.cfg'
  utils.WriteFile(ks_cfg, ks_content)

  # Write the installer disk. Write GPT label, create partition,
  # copy installer boot files over.
  logging.info('Writing installer disk.')
  utils.Execute(['parted', '/dev/sdb', 'mklabel', 'gpt'])
  utils.Execute(['sync'])
  utils.Execute(['parted', '/dev/sdb', 'mkpart', 'primary', 'fat32', '1MB',
                 '201MB'])
  utils.Execute(['sync'])
  utils.Execute(['parted', '/dev/sdb', 'mkpart', 'primary', 'ext2', '201MB',
                 '100%'])
  utils.Execute(['sync'])
  utils.Execute(['parted', '/dev/sdb', 'set', '1', 'boot', 'on'])
  utils.Execute(['sync'])
  utils.Execute(['parted', '/dev/sdb', 'set', '1', 'esp', 'on'])
  utils.Execute(['sync'])
  utils.Execute(['mkfs.vfat', '-F', '32', '/dev/sdb1'])
  utils.Execute(['sync'])
  utils.Execute(['fatlabel', '/dev/sdb1', 'ESP'])
  utils.Execute(['sync'])
  utils.Execute(['mkfs.ext2', '-L', 'INSTALLER', '/dev/sdb2'])
  utils.Execute(['sync'])

  utils.Execute(['mkdir', '-vp', 'iso', 'installer', 'boot'])
  utils.Execute(['mount', '-o', 'ro,loop', '-t', 'iso9660', iso_file, 'iso'])
  utils.Execute(['mount', '-t', 'vfat', '/dev/sdb1', 'boot'])
  utils.Execute(['mount', '-t', 'ext2', '/dev/sdb2', 'installer'])
  utils.Execute(['rsync', '-Pav', 'iso/EFI', 'iso/images', 'boot/'])
  utils.Execute(['cp', iso_file, 'installer/'])
  utils.Execute(['cp', ks_cfg, 'installer/'])

  # Modify boot config.
  with open('boot/EFI/BOOT/grub.cfg', 'r+') as f:
    oldcfg = f.read()
    cfg = re.sub(r'-l .RHEL.*', r"""-l 'ESP'""", oldcfg)
    cfg = re.sub(r'timeout=60', 'timeout=1', cfg)
    cfg = re.sub(r'set default=.*', 'set default="0"', cfg)
    cfg = re.sub(r'load_video\n', r'serial --speed=38400 --unit=0 --word=8 '
                 '--parity=no\nterminal_input serial\nterminal_output '
                 'serial\n', cfg)

    # Change boot args.
    args = ' '.join([
        'text', 'ks=hd:LABEL=INSTALLER:/%s' % ks_cfg,
        'console=ttyS0,38400n8', 'inst.sshd=1', 'inst.gpt'
    ])
    # Tell Anaconda not to store its logs in the installed image,
    # unless requested to keep them for debugging.
    if not savelogs:
      args += ' inst.nosave=all'
    cfg = re.sub(r'inst\.stage2.*', r'\g<0> %s' % args, cfg)

    if release in ['centos7', 'rhel7', 'oraclelinux7']:
      cfg = re.sub(r'LABEL=[^ :]+', 'LABEL=INSTALLER', cfg)

    # Print out a the modifications.
    diff = difflib.Differ().compare(oldcfg.splitlines(1), cfg.splitlines(1))
    logging.info('Modified grub.cfg:\n%s' % '\n'.join(diff))

    f.seek(0)
    f.write(cfg)
    f.truncate()

  logging.info("Creating boot path file\n")
  utils.Execute(['mkdir', '-p', 'boot/EFI/Google/gsetup'])
  with open('boot/EFI/Google/gsetup/boot', 'w') as g:
    g.write("\\EFI\\BOOT\\BOOTX64.EFI\n")

  utils.Execute(['umount', 'installer'])
  utils.Execute(['umount', 'iso'])
  utils.Execute(['umount', 'boot'])
コード例 #17
0
def main():
  # Get Parameters.
  build_date = utils.GetMetadataAttribute(
      'build_date', raise_on_not_found=True)
  debian_cloud_images_version = '69783f7417aefb332d5d7250ba242adeca444131'
  debian_version = utils.GetMetadataAttribute(
      'debian_version', raise_on_not_found=True)
  outs_path = utils.GetMetadataAttribute('daisy-outs-path',
                                         raise_on_not_found=True)

  logging.info('debian-cloud-images version: %s' % debian_cloud_images_version)
  logging.info('debian version: %s' % debian_version)

  # force an apt-get update before next install
  utils.AptGetInstall.first_run = True
  utils.AptGetInstall(['fai-server', 'fai-setup-storage'])

  # Download and setup debian's debian-cloud-images scripts.
  url_params = {
      'project': 'debian-cloud-images',
      'version': debian_cloud_images_version,
  }
  url_params['filename'] = '%(project)s-%(version)s' % url_params

  url = ('https://salsa.debian.org/cloud-team/'
         '%(project)s/-/archive/%(version)s/%(filename)s.tar.gz' % url_params)

  logging.info('Downloading %(project)s at version %(version)s', url_params)
  urllib.request.urlretrieve(url, 'fci.tar.gz')
  with tarfile.open('fci.tar.gz') as tar:
    tar.extractall()
  logging.info('Downloaded and extracted %s.', url)

  work_dir = url_params['filename']
  config_space = os.getcwd() + '/' + work_dir + '/config_space/'

  # We are going to replace this with our variant
  os.remove(config_space + 'class/BULLSEYE.var')

  # Remove failing test method for now.
  os.remove(config_space + 'hooks/tests.CLOUD')

  # Copy our classes to the FAI config space
  mycopytree('/files/fai_config', config_space)

  # Set scripts executable (daisy doesn't preserve this)
  os.chmod(config_space + 'scripts/GCE_CLEAN/10-gce-clean', 0o755)
  os.chmod(config_space + 'scripts/GCE_SPECIFIC/12-sshd', 0o755)

  # Config fai-tool
  # Base classes
  fai_classes = ['DEBIAN', 'CLOUD', 'GCE', 'GCE_SDK', 'LINUX_IMAGE_CLOUD',
                 'GCE_SPECIFIC', 'GCE_CLEAN']

  # Arch-specific classes
  if platform.machine() == 'aarch64':
    fai_classes += ['ARM64', 'GRUB_EFI_ARM64', 'BACKPORTS_LINUX']
  else:
    fai_classes += ['AMD64', 'GRUB_CLOUD_AMD64']

  # Version-specific classes
  if debian_version == 'buster':
    fai_classes += ['BUSTER']
  elif debian_version == 'bullseye':
    fai_classes += ['BULLSEYE']
  elif debian_version == 'sid':
    fai_classes += ['SID']

  image_size = '10G'
  disk_name = 'disk.raw'

  # Run fai-tool.
  cmd = ['fai-diskimage', '--verbose', '--hostname', 'debian', '--class',
         ','.join(fai_classes), '--size', image_size, '--cspace',
         config_space, disk_name]
  logging.info('Starting build in %s with params: %s', work_dir, ' '.join(cmd))
  returncode, output = utils.Execute(
      cmd, cwd=work_dir, capture_output=True, raise_errors=False)

  # Verbose printing to console for debugging.
  for line in output.splitlines():
    print(line)

  if returncode != 0:
    raise subprocess.CalledProcessError(returncode, cmd)

  # Packs a gzipped tar file with disk.raw inside
  disk_tar_gz = 'debian-{}-{}.tar.gz'.format(debian_version, build_date)
  logging.info('Compressing it into tarball %s', disk_tar_gz)
  tar = tarfile.open(disk_tar_gz, 'w:gz', format=tarfile.GNU_FORMAT)
  tar.add('%s/%s' % (work_dir, disk_name), arcname=disk_name)
  tar.close()

  # Upload tar.
  image_dest = os.path.join(outs_path, 'root.tar.gz')
  logging.info('Saving %s to %s', disk_tar_gz, image_dest)
  utils.UploadFile(disk_tar_gz, image_dest)