def SignAndroidImage(rootfs_dir, keyset, vboot_path=None): """If there is an android image, sign it.""" system_img = os.path.join(rootfs_dir, 'opt/google/containers/android/system.raw.img') if not os.path.exists(system_img): logging.info('ARC image not found. Not signing Android APKs.') return arc_version = key_value_store.LoadFile( os.path.join(rootfs_dir, 'etc/lsb-release')).get('CHROMEOS_ARC_VERSION', '') if not arc_version: logging.warning('CHROMEOS_ARC_VERSION not found in lsb-release. ' 'Not signing Android APKs.') return extra_env = _PathForVbootSigningScripts(vboot_path) logging.info('Found ARC image version %s, resigning APKs', arc_version) # Sign the Android APKs using ${keyset.key_dir}/android keys. android_keydir = os.path.join(keyset.key_dir, 'android') logging.info('Using %s', android_keydir) # TODO(lamontjones) migrate sign_android_image.sh. cros_build_lib.run(['sign_android_image.sh', rootfs_dir, android_keydir], extra_env=extra_env)
def _ValidateBinhostConf(path, key): """Validates the binhost conf file defines only one environment variable. This function is effectively a sanity check that ensures unexpected configuration is not clobbered by conf overwrites. Args: path: Path to the file to validate. key: Expected binhost key. Raises: ValueError: If file defines != 1 environment variable. """ if not os.path.exists(path): # If the conf file does not exist, e.g. with new targets, then whatever. return kvs = key_value_store.LoadFile(path) if not kvs: raise ValueError( 'Found empty .conf file %s when a non-empty one was expected.' % path) elif len(kvs) > 1: raise ValueError('Conf file %s must define exactly 1 variable. ' 'Instead found: %r' % (path, kvs)) elif key not in kvs: raise KeyError('Did not find key %s in %s' % (key, path))
def _EnvdGetVar(envd, var): """Given a Gentoo env.d file, extract a var from it Args: envd: The env.d file to load (may be a glob path) var: The var to extract Returns: The value of |var| """ envds = glob.glob(envd) assert len(envds) == 1, '%s: should have exactly 1 env.d file' % envd envd = envds[0] return key_value_store.LoadFile(envd)[var]
def MoblabVmTest(input_proto, _output_proto, _config): """Run Moblab VM tests.""" chroot = controller_util.ParseChroot(input_proto.chroot) image_payload_dir = input_proto.image_payload.path.path cache_payload_dirs = [cp.path.path for cp in input_proto.cache_payloads] # Autotest and Moblab depend on the builder path, so we must read it from # the image. image_file = os.path.join(image_payload_dir, constants.TEST_IMAGE_BIN) with osutils.TempDir() as mount_dir: with image_lib.LoopbackPartitions(image_file, destination=mount_dir) as lp: # The file we want is /etc/lsb-release, which lives in the ROOT-A # disk partition. partition_paths = lp.Mount([constants.PART_ROOT_A]) assert len(partition_paths) == 1, ( 'expected one partition path, got: %r' % partition_paths) partition_path = partition_paths[0] lsb_release_file = os.path.join( partition_path, constants.LSB_RELEASE_PATH.strip('/')) lsb_release_kvs = key_value_store.LoadFile(lsb_release_file) builder = lsb_release_kvs.get( cros_set_lsb_release.LSB_KEY_BUILDER_PATH) if not builder: cros_build_lib.Die('Image did not contain key %s in %s', cros_set_lsb_release.LSB_KEY_BUILDER_PATH, constants.LSB_RELEASE_PATH) # Now we can run the tests. with chroot.tempdir() as workspace_dir, chroot.tempdir() as results_dir: # Convert the results directory to an absolute chroot directory. chroot_results_dir = '/%s' % os.path.relpath(results_dir, chroot.path) vms = test.CreateMoblabVm(workspace_dir, chroot.path, image_payload_dir) cache_dir = test.PrepareMoblabVmImageCache(vms, builder, cache_payload_dirs) test.RunMoblabVmTest(chroot, vms, builder, cache_dir, chroot_results_dir) test.ValidateMoblabVmTest(results_dir)
def PerformStage(self): config = self._run.config build_root = self._build_root # Workspace builders use a different buildroot for overlays. if config.workspace_branch and self._run.options.workspace: build_root = self._run.options.workspace logging.info('Build re-executions have finished. Chromite source ' 'will not be modified for remainder of run.') logging.info("config['important']=%s", config['important']) logging.PrintBuildbotStepText("config['important']=%s" % config['important']) # Flat list of all child config boards. Since child configs # are not allowed to have children, it is not necessary to search # deeper than one generation. child_configs = GetChildConfigListMetadata( child_configs=config['child_configs'], config_status_map=None) sdk_verinfo = key_value_store.LoadFile(os.path.join( build_root, constants.SDK_VERSION_FILE), ignore_missing=True) verinfo = self._run.GetVersionInfo() platform_tag = getattr(self._run.attrs, 'release_tag') if not platform_tag: platform_tag = verinfo.VersionString() version = { 'full': self._run.GetVersion(), 'milestone': verinfo.chrome_branch, 'platform': platform_tag, } metadata = { # Version of the metadata format. 'metadata-version': '2', 'boards': config['boards'], 'child-configs': child_configs, 'build_type': config['build_type'], 'important': config['important'], # Data for the toolchain used. 'sdk-version': sdk_verinfo.get('SDK_LATEST_VERSION', '<unknown>'), 'toolchain-url': sdk_verinfo.get('TC_PATH', '<unknown>'), } if len(config['boards']) == 1: metadata['toolchain-tuple'] = toolchain.GetToolchainTupleForBoard( config['boards'][0], buildroot=build_root) logging.info('Metadata being written: %s', metadata) self._run.attrs.metadata.UpdateWithDict(metadata) toolchains = set() toolchain_tuples = [] primary_toolchains = [] for board in config['boards']: toolchain_tuple = toolchain.GetToolchainTupleForBoard( board, buildroot=build_root) toolchains |= set(toolchain_tuple) toolchain_tuples.append(','.join(toolchain_tuple)) if toolchain_tuple: primary_toolchains.append(toolchain_tuple[0]) # Update 'version' separately to avoid overwriting the existing # entries in it (e.g. PFQ builders may have written the Chrome # version to uprev). logging.info("Metadata 'version' being written: %s", version) self._run.attrs.metadata.UpdateKeyDictWithDict('version', version) tags = { 'boards': config['boards'], 'child_config_names': [cc['name'] for cc in child_configs], 'build_type': config['build_type'], 'important': config['important'], # Data for the toolchain used. 'sdk_version': sdk_verinfo.get('SDK_LATEST_VERSION', '<unknown>'), 'toolchain_url': sdk_verinfo.get('TC_PATH', '<unknown>'), 'toolchains': list(toolchains), 'toolchain_tuples': toolchain_tuples, 'primary_toolchains': primary_toolchains, } full_version = self._run.attrs.metadata.GetValue('version') tags.update({'version_%s' % v: full_version[v] for v in full_version}) self._run.attrs.metadata.UpdateKeyDictWithDict(constants.METADATA_TAGS, tags) # Ensure that all boards and child config boards have a per-board # metadata subdict. for b in config['boards']: self._run.attrs.metadata.UpdateBoardDictWithDict(b, {}) for cc in child_configs: for b in cc['boards']: self._run.attrs.metadata.UpdateBoardDictWithDict(b, {}) # Upload build metadata (and write it to database if necessary) self.UploadMetadata(filename=constants.PARTIAL_METADATA_JSON) # Write child-per-build and board-per-build rows to database build_identifier, db = self._run.GetCIDBHandle() build_id = build_identifier.cidb_id if db: # TODO(akeshet): replace this with a GetValue call once crbug.com/406522 # is resolved per_board_dict = self._run.attrs.metadata.GetDict( )['board-metadata'] for board, board_metadata in per_board_dict.items(): self.buildstore.InsertBoardPerBuild(build_id, board) if board_metadata: self.buildstore.InsertBoardPerBuild( build_id, board, board_metadata) # Abort previous hw test suites. This happens after reexecution as it # requires chromite/third_party/swarming.client, which is not available # untill after reexecution. self._AbortPreviousHWTestSuites()
def _RunAndCompare(self, test_input, multiline): result = key_value_store.LoadFile(test_input, multiline=multiline) self.assertEqual(self.expected, result)
def main(argv): # Turn on strict sudo checks. cros_build_lib.STRICT_SUDO = True conf = key_value_store.LoadFile(os.path.join(constants.SOURCE_ROOT, constants.SDK_VERSION_FILE), ignore_missing=True) sdk_latest_version = conf.get('SDK_LATEST_VERSION', '<unknown>') bootstrap_frozen_version = conf.get('BOOTSTRAP_FROZEN_VERSION', '<unknown>') # Use latest SDK for bootstrapping if requested. Use a frozen version of SDK # for bootstrapping if BOOTSTRAP_FROZEN_VERSION is set. bootstrap_latest_version = (sdk_latest_version if bootstrap_frozen_version == '<unknown>' else bootstrap_frozen_version) parser, commands = _CreateParser(sdk_latest_version, bootstrap_latest_version) options = parser.parse_args(argv) chroot_command = options.commands # Some sanity checks first, before we ask for sudo credentials. cros_build_lib.AssertOutsideChroot() host = os.uname()[4] if host != 'x86_64': cros_build_lib.Die( "cros_sdk is currently only supported on x86_64; you're running" ' %s. Please find a x86_64 machine.' % (host, )) # Merge the outside PATH setting if we re-execed ourselves. if 'CHROMEOS_SUDO_PATH' in os.environ: os.environ['PATH'] = '%s:%s' % (os.environ.pop('CHROMEOS_SUDO_PATH'), os.environ['PATH']) _ReportMissing(osutils.FindMissingBinaries(NEEDED_TOOLS)) if options.proxy_sim: _ReportMissing(osutils.FindMissingBinaries(PROXY_NEEDED_TOOLS)) missing_image_tools = osutils.FindMissingBinaries(IMAGE_NEEDED_TOOLS) if (sdk_latest_version == '<unknown>' or bootstrap_latest_version == '<unknown>'): cros_build_lib.Die( 'No SDK version was found. ' 'Are you in a Chromium source tree instead of Chromium OS?\n\n' 'Please change to a directory inside your Chromium OS source tree\n' 'and retry. If you need to setup a Chromium OS source tree, see\n' ' https://dev.chromium.org/chromium-os/developer-guide') any_snapshot_operation = (options.snapshot_create or options.snapshot_restore or options.snapshot_delete or options.snapshot_list) if any_snapshot_operation and not options.use_image: cros_build_lib.Die('Snapshot operations are not compatible with ' '--nouse-image.') if (options.snapshot_delete and options.snapshot_delete == options.snapshot_restore): parser.error('Cannot --snapshot_delete the same snapshot you are ' 'restoring with --snapshot_restore.') _ReExecuteIfNeeded([sys.argv[0]] + argv) lock_path = os.path.dirname(options.chroot) lock_path = os.path.join( lock_path, '.%s_lock' % os.path.basename(options.chroot).lstrip('.')) # Expand out the aliases... if options.replace: options.delete = options.create = True if options.bootstrap: options.create = True # If a command is not given, default to enter. # pylint: disable=protected-access # This _group_actions access sucks, but upstream decided to not include an # alternative to optparse's option_list, and this is what they recommend. options.enter |= not any( getattr(options, x.dest) for x in commands._group_actions) # pylint: enable=protected-access options.enter |= bool(chroot_command) if (options.delete and not options.create and (options.enter or any_snapshot_operation)): parser.error('Trying to enter or snapshot the chroot when --delete ' 'was specified makes no sense.') if (options.unmount and (options.create or options.enter or any_snapshot_operation)): parser.error( '--unmount cannot be specified with other chroot actions.') if options.working_dir is not None and not os.path.isabs( options.working_dir): options.working_dir = path_util.ToChrootPath(options.working_dir) # Discern if we need to create the chroot. chroot_exists = cros_sdk_lib.IsChrootReady(options.chroot) if (options.use_image and not chroot_exists and not options.delete and not options.unmount and not missing_image_tools and os.path.exists(_ImageFileForChroot(options.chroot))): # Try to re-mount an existing image in case the user has rebooted. with cgroups.SimpleContainChildren('cros_sdk'): with locking.FileLock(lock_path, 'chroot lock') as lock: logging.debug( 'Checking if existing chroot image can be mounted.') lock.write_lock() cros_sdk_lib.MountChroot(options.chroot, create=False) chroot_exists = cros_sdk_lib.IsChrootReady(options.chroot) if chroot_exists: logging.notice('Mounted existing image %s on chroot', _ImageFileForChroot(options.chroot)) # Finally, flip create if necessary. if options.enter or options.snapshot_create: options.create |= not chroot_exists # Make sure we will download if we plan to create. options.download |= options.create # Anything that needs to manipulate the main chroot mount or communicate with # LVM needs to be done here before we enter the new namespaces. # If deleting, do it regardless of the use_image flag so that a # previously-created loopback chroot can also be cleaned up. # TODO(bmgordon): See if the DeleteChroot call below can be removed in # favor of this block. chroot_deleted = False if options.delete: with cgroups.SimpleContainChildren('cros_sdk'): # Set a timeout of 300 seconds when getting the lock. with locking.FileLock(lock_path, 'chroot lock', blocking_timeout=300) as lock: try: lock.write_lock() except timeout_util.TimeoutError as e: logging.error('Acquiring write_lock on %s failed: %s', lock_path, e) if not options.force: cros_build_lib.Die( 'Exiting; use --force to continue w/o lock.') else: logging.warning( 'cros_sdk was invoked with force option, continuing.' ) if missing_image_tools: logging.notice('Unmounting chroot.') osutils.UmountTree(options.chroot) else: logging.notice('Deleting chroot.') cros_sdk_lib.CleanupChrootMount(options.chroot, delete=True) chroot_deleted = True # If cleanup was requested, we have to do it while we're still in the original # namespace. Since cleaning up the mount will interfere with any other # commands, we exit here. The check above should have made sure that no other # action was requested, anyway. if options.unmount: # Set a timeout of 300 seconds when getting the lock. with locking.FileLock(lock_path, 'chroot lock', blocking_timeout=300) as lock: try: lock.write_lock() except timeout_util.TimeoutError as e: logging.error('Acquiring write_lock on %s failed: %s', lock_path, e) logging.warning( 'Continuing with CleanupChroot(%s), which will umount the tree.', options.chroot) # We can call CleanupChroot (which calls cros_sdk_lib.CleanupChrootMount) # even if we don't get the lock because it will attempt to unmount the # tree and will print diagnostic information from 'fuser', 'lsof', and # 'ps'. CleanupChroot(options.chroot) sys.exit(0) # Make sure the main chroot mount is visible. Contents will be filled in # below if needed. if options.create and options.use_image: if missing_image_tools: raise SystemExit("""The tool(s) %s were not found. Please make sure the lvm2 and thin-provisioning-tools packages are installed on your host. Example(ubuntu): sudo apt-get install lvm2 thin-provisioning-tools If you want to run without lvm2, pass --nouse-image (chroot snapshots will be unavailable).""" % ', '.join(missing_image_tools)) logging.debug('Making sure chroot image is mounted.') with cgroups.SimpleContainChildren('cros_sdk'): with locking.FileLock(lock_path, 'chroot lock') as lock: lock.write_lock() if not cros_sdk_lib.MountChroot(options.chroot, create=True): cros_build_lib.Die('Unable to mount %s on chroot', _ImageFileForChroot(options.chroot)) logging.notice('Mounted %s on chroot', _ImageFileForChroot(options.chroot)) # Snapshot operations will always need the VG/LV, but other actions won't. if any_snapshot_operation: with cgroups.SimpleContainChildren('cros_sdk'): with locking.FileLock(lock_path, 'chroot lock') as lock: chroot_vg, chroot_lv = cros_sdk_lib.FindChrootMountSource( options.chroot) if not chroot_vg or not chroot_lv: cros_build_lib.Die('Unable to find VG/LV for chroot %s', options.chroot) # Delete snapshot before creating a new one. This allows the user to # throw out old state, create a new snapshot, and enter the chroot in a # single call to cros_sdk. Since restore involves deleting, also do it # before creating. if options.snapshot_restore: lock.write_lock() valid_snapshots = ListChrootSnapshots(chroot_vg, chroot_lv) if options.snapshot_restore not in valid_snapshots: cros_build_lib.Die( '%s is not a valid snapshot to restore to. ' 'Valid snapshots: %s', options.snapshot_restore, ', '.join(valid_snapshots)) osutils.UmountTree(options.chroot) if not RestoreChrootSnapshot(options.snapshot_restore, chroot_vg, chroot_lv): cros_build_lib.Die( 'Unable to restore chroot to snapshot.') if not cros_sdk_lib.MountChroot(options.chroot, create=False): cros_build_lib.Die( 'Unable to mount restored snapshot onto chroot.') # Use a read lock for snapshot delete and create even though they modify # the filesystem, because they don't modify the mounted chroot itself. # The underlying LVM commands take their own locks, so conflicting # concurrent operations here may crash cros_sdk, but won't corrupt the # chroot image. This tradeoff seems worth it to allow snapshot # operations on chroots that have a process inside. if options.snapshot_delete: lock.read_lock() DeleteChrootSnapshot(options.snapshot_delete, chroot_vg, chroot_lv) if options.snapshot_create: lock.read_lock() if not CreateChrootSnapshot(options.snapshot_create, chroot_vg, chroot_lv): cros_build_lib.Die('Unable to create snapshot.') img_path = _ImageFileForChroot(options.chroot) if (options.use_image and os.path.exists(options.chroot) and os.path.exists(img_path)): img_stat = os.stat(img_path) img_used_bytes = img_stat.st_blocks * 512 mount_stat = os.statvfs(options.chroot) mount_used_bytes = mount_stat.f_frsize * (mount_stat.f_blocks - mount_stat.f_bfree) extra_gbs = (img_used_bytes - mount_used_bytes) // 2**30 if extra_gbs > MAX_UNUSED_IMAGE_GBS: logging.notice( '%s is using %s GiB more than needed. Running ' 'fstrim.', img_path, extra_gbs) cmd = ['fstrim', options.chroot] try: cros_build_lib.dbg_run(cmd) except cros_build_lib.RunCommandError as e: logging.warning( 'Running fstrim failed. Consider running fstrim on ' 'your chroot manually.\n%s', e) # Enter a new set of namespaces. Everything after here cannot directly affect # the hosts's mounts or alter LVM volumes. namespaces.SimpleUnshare() if options.ns_pid: first_pid = namespaces.CreatePidNs() else: first_pid = None if options.snapshot_list: for snap in ListChrootSnapshots(chroot_vg, chroot_lv): print(snap) sys.exit(0) if not options.sdk_version: sdk_version = (bootstrap_latest_version if options.bootstrap else sdk_latest_version) else: sdk_version = options.sdk_version if options.buildbot_log_version: logging.PrintBuildbotStepText(sdk_version) # Based on selections, determine the tarball to fetch. if options.download: if options.sdk_url: urls = [options.sdk_url] else: urls = GetArchStageTarballs(sdk_version) with cgroups.SimpleContainChildren('cros_sdk', pid=first_pid): with locking.FileLock(lock_path, 'chroot lock') as lock: if options.proxy_sim: _ProxySimSetup(options) if (options.delete and not chroot_deleted and (os.path.exists(options.chroot) or os.path.exists(_ImageFileForChroot(options.chroot)))): lock.write_lock() DeleteChroot(options.chroot) sdk_cache = os.path.join(options.cache_dir, 'sdks') distfiles_cache = os.path.join(options.cache_dir, 'distfiles') osutils.SafeMakedirsNonRoot(options.cache_dir) for target in (sdk_cache, distfiles_cache): src = os.path.join(constants.SOURCE_ROOT, os.path.basename(target)) if not os.path.exists(src): osutils.SafeMakedirsNonRoot(target) continue lock.write_lock( 'Upgrade to %r needed but chroot is locked; please exit ' 'all instances so this upgrade can finish.' % src) if not os.path.exists(src): # Note that while waiting for the write lock, src may've vanished; # it's a rare race during the upgrade process that's a byproduct # of us avoiding taking a write lock to do the src check. If we # took a write lock for that check, it would effectively limit # all cros_sdk for a chroot to a single instance. osutils.SafeMakedirsNonRoot(target) elif not os.path.exists(target): # Upgrade occurred, but a reversion, or something whacky # occurred writing to the old location. Wipe and continue. os.rename(src, target) else: # Upgrade occurred once already, but either a reversion or # some before/after separate cros_sdk usage is at play. # Wipe and continue. osutils.RmDir(src) if options.download: lock.write_lock() sdk_tarball = FetchRemoteTarballs( sdk_cache, urls, 'stage3' if options.bootstrap else 'SDK') if options.create: lock.write_lock() # Recheck if the chroot is set up here before creating to make sure we # account for whatever the various delete/unmount/remount steps above # have done. if cros_sdk_lib.IsChrootReady(options.chroot): logging.debug('Chroot already exists. Skipping creation.') else: CreateChroot(options.chroot, sdk_tarball, options.cache_dir, nousepkg=(options.bootstrap or options.nousepkg)) if options.enter: lock.read_lock() EnterChroot(options.chroot, options.cache_dir, options.chrome_root, options.chrome_root_mount, options.goma_dir, options.goma_client_json, options.working_dir, chroot_command)