def ForkWatchdog(self): if self._forked: raise RuntimeError("ForkWatchdog was invoked twice for %s" % (self,)) self._lock.write_lock() pid = os.fork() self._forked = True if pid: # Parent; nothing further to do here. return # Get ourselves a new process group; note that we do not reparent # to init. Similarly, escape from any cgroups that we are in. This # ensures that our cleanup section will run, even if our parents # are trying to kill us. os.setsid() self._cgroups = cgroups.SimpleContainChildren('cleanup', nesting=False) self._cgroups.__enter__() # Since we share stdin/stdout/whatever, suppress sigint should we somehow # become the foreground process in the session group. # pylint: disable=W0212 signal.signal(signal.SIGINT, signal.SIG_IGN) # Child code. We lose the lock via lockf/fork semantics. self._is_child = True try: self._lock.write_lock() except Exception, e: print >> sys.stderr, ("EnforcedCleanupSection %s excepted(%r) attempting " "to take the write lock; hard exiting." % (self, e)) # We have no way of knowing the state of the parent if this locking # fails- failure means a code bug. Specifically, we don't know if # cleanup code was run, thus just flat out bail. os._exit(1)
def PerformStage(self): # These directories are used later to archive test artifacts. test_results_root = commands.CreateTestRoot(self._build_root) test_basename = _GCE_TEST_RESULTS % dict(attempt=self._attempt) if self._test_basename: test_basename = self._test_basename try: if not self._gce_tests: self._gce_tests = self._run.config.gce_tests for gce_test in self._gce_tests: logging.info('Running GCE test %s.', gce_test.test_type) if gce_test.test_type == constants.GCE_SUITE_TEST_TYPE: per_test_results_dir = os.path.join( test_results_root, gce_test.test_suite) else: per_test_results_dir = os.path.join( test_results_root, gce_test.test_type) with cgroups.SimpleContainChildren('GCETest'): r = ' Reached GCETestStage test run timeout.' with timeout_util.Timeout(self.TEST_TIMEOUT, reason_message=r): self._RunTest(gce_test, per_test_results_dir) except Exception: # pylint: disable=logging-not-lazy logging.error( _ERROR_MSG % dict(test_name='GCETests', test_results=test_basename)) raise finally: self._ArchiveTestResults(test_results_root, test_basename)
def PerformStage(self): # These directories are used later to archive test artifacts. if not self._run.options.vmtests: return test_results_root = commands.CreateTestRoot(self._build_root) test_basename = _VM_TEST_RESULTS % dict(attempt=self._attempt) if self._test_basename: test_basename = self._test_basename try: if not self._vm_tests: self._vm_tests = self._run.config.vm_tests failed_tests = [] for vm_test in self._vm_tests: logging.info('Running VM test %s.', vm_test.test_type) if vm_test.test_type == constants.VM_SUITE_TEST_TYPE: per_test_results_dir = os.path.join( test_results_root, vm_test.test_suite) else: per_test_results_dir = os.path.join( test_results_root, vm_test.test_type) try: with cgroups.SimpleContainChildren('VMTest'): r = ' Reached VMTestStage test run timeout.' with timeout_util.Timeout(vm_test.timeout, reason_message=r): self._RunTest(vm_test, per_test_results_dir) except Exception: failed_tests.append(vm_test) if vm_test.warn_only: logging.warning( 'Optional test failed. Forgiving the failure.') else: raise if failed_tests: # If any of the tests failed but not raise an exception, mark # the stage as warning. self._stage_exception_handler = self._HandleExceptionAsWarning raise failures_lib.TestWarning( 'VMTestStage succeeded, but some optional tests failed.') except Exception as e: if not isinstance(e, failures_lib.TestWarning): # pylint: disable=logging-not-lazy logging.error( _ERROR_MSG % dict(test_name='VMTests', test_results=test_basename)) self._ArchiveVMFiles(test_results_root) raise finally: if self._run.config.vm_test_report_to_dashboards: self._ReportResultsToDashboards(test_results_root) self._ArchiveTestResults(test_results_root, test_basename)
def PerformStage(self): # These directories are used later to archive test artifacts. test_results_dir = commands.CreateTestRoot(self._build_root) test_basename = constants.GCE_TEST_RESULTS % dict(attempt=self._attempt) try: logging.info('Running GCE tests...') with cgroups.SimpleContainChildren('GCETest'): r = ' Reached GCETestStage test run timeout.' with timeout_util.Timeout(self.TEST_TIMEOUT, reason_message=r): self._RunTest(constants.GCE_VM_TEST_TYPE, test_results_dir) except Exception: logging.error(_GCE_TEST_ERROR_MSG % dict(gce_test_results=test_basename)) raise finally: self._ArchiveTestResults(test_results_dir, test_basename)
def PerformStage(self): # These directories are used later to archive test artifacts. test_results_dir = commands.CreateTestRoot(self._build_root) test_basename = constants.VM_TEST_RESULTS % dict(attempt=self._attempt) try: for vm_test in self._run.config.vm_tests: logging.info('Running VM test %s.', vm_test.test_type) with cgroups.SimpleContainChildren('VMTest'): r = ' Reached VMTestStage test run timeout.' with timeout_util.Timeout(vm_test.timeout, reason_message=r): self._RunTest(vm_test.test_type, test_results_dir) except Exception: logging.error(_VM_TEST_ERROR_MSG % dict(vm_test_results=test_basename)) self._ArchiveVMFiles(test_results_dir) raise finally: self._ArchiveTestResults(test_results_dir, test_basename)
def _RunAllSuites(self, suites, base_chroot_results_dir): """Runs multiple test suites sequentially. Args: suites: List of TastVMTestConfig objects describing suites to run. base_chroot_results_dir: Base results directory relative to chroot. Raises: failures_lib.TestFailure if an internal error is encountered. """ with cgroups.SimpleContainChildren('TastVMTest'): for suite in suites: logging.info('Running Tast VM test suite %s (%s)', suite.suite_name, (' '.join(suite.test_exprs))) # We apparently always prefix reasons with spaces because timeout_util # appends them directly to error messages. reason = ' Reached TastVMTestStage test run timeout.' with timeout_util.Timeout(suite.timeout, reason_message=reason): self._RunSuite(suite.test_exprs, os.path.join(base_chroot_results_dir, suite.suite_name), suite.timeout)
def main(argv): usage = """usage: %prog [options] [VAR1=val1 .. VARn=valn -- args] This script is used for manipulating local chroot environments; creating, deleting, downloading, etc. If given --enter (or no args), it defaults to an interactive bash shell within the chroot. If given args those are passed to the chroot environment, and executed.""" conf = cros_build_lib.LoadKeyValueFile(os.path.join( constants.SOURCE_ROOT, constants.SDK_VERSION_FILE), ignore_missing=True) sdk_latest_version = conf.get('COREOS_SDK_VERSION', '<unknown>') parser = commandline.OptionParser(usage=usage, caching=True) commands = parser.add_option_group("Commands") commands.add_option('--enter', action='store_true', default=False, help='Enter the SDK chroot. Implies --create.') commands.add_option( '--create', action='store_true', default=False, help='Create the chroot only if it does not already exist. ' 'Implies --download.') commands.add_option( '--bootstrap', action='store_true', default=False, help='Build everything from scratch, including the sdk. ' 'Use this only if you need to validate a change ' 'that affects SDK creation itself (toolchain and ' 'build are typically the only folk who need this). ' 'Note this will quite heavily slow down the build. ' 'This option implies --create --nousepkg.') commands.add_option( '-r', '--replace', action='store_true', default=False, help='Replace an existing SDK chroot. Basically an alias ' 'for --delete --create.') commands.add_option('--delete', action='store_true', default=False, help='Delete the current SDK chroot if it exists.') commands.add_option('--download', action='store_true', default=False, help='Download the sdk.') # Global options: default_chroot = os.path.join(constants.SOURCE_ROOT, constants.DEFAULT_CHROOT_DIR) parser.add_option('--chroot', dest='chroot', default=default_chroot, type='path', help=('SDK chroot dir name [%s]' % constants.DEFAULT_CHROOT_DIR)) parser.add_option('--chrome_root', default=None, type='path', help='Mount this chrome root into the SDK chroot') parser.add_option('--chrome_root_mount', default=None, type='path', help='Mount chrome into this path inside SDK chroot') parser.add_option( '--nousepkg', action='store_true', default=False, help='Do not use binary packages when creating a chroot.') parser.add_option('--nogetbinpkg', action='store_true', default=False, help='Do not fetch remote binary packages.') parser.add_option('-u', '--url', dest='sdk_url', default=None, help=('''Use sdk tarball located at this url. Use file:// for local files.''')) parser.add_option('--sdk-version', default=sdk_latest_version, help='Use this sdk version. Current is %default.') options, chroot_command = parser.parse_args(argv) # Some sanity checks first, before we ask for sudo credentials. cros_build_lib.AssertOutsideChroot() host = os.uname()[4] if host != 'x86_64': parser.error( "cros_sdk is currently only supported on x86_64; you're running" " %s. Please find a x86_64 machine." % (host, )) missing = osutils.FindMissingBinaries(NEEDED_TOOLS) if missing: parser.error( ('The tool(s) %s were not found.\n' 'Please install the appropriate package in your host.\n' 'Example(ubuntu):\n' ' sudo apt-get install <packagename>' % (', '.join(missing)))) _ReExecuteIfNeeded([sys.argv[0]] + argv) # Expand out the aliases... if options.replace: options.delete = options.create = True if options.bootstrap: options.create = True # If a command is not given, default to enter. options.enter |= not any( getattr(options, x.dest) for x in commands.option_list) options.enter |= bool(chroot_command) if options.enter and options.delete and not options.create: parser.error("Trying to enter the chroot when --delete " "was specified makes no sense.") # Finally, discern if we need to create the chroot. chroot_exists = os.path.exists(options.chroot) if options.create or options.enter: # Only create if it's being wiped, or if it doesn't exist. if not options.delete and chroot_exists: options.create = False else: options.download = True # Finally, flip create if necessary. if options.enter: options.create |= not chroot_exists # Based on selections, fetch the tarball. if options.sdk_url: urls = [options.sdk_url] else: urls = GetArchStageTarballs(options.sdk_version) lock_path = os.path.dirname(options.chroot) lock_path = os.path.join(lock_path, '.%s_lock' % os.path.basename(options.chroot)) with cgroups.SimpleContainChildren('cros_sdk'): with locking.FileLock(lock_path, 'chroot lock') as lock: if options.delete and os.path.exists(options.chroot): lock.write_lock() DeleteChroot(options.chroot) sdk_cache = os.path.join(options.cache_dir, 'sdks') distfiles_cache = os.path.join(options.cache_dir, 'distfiles') osutils.SafeMakedirs(options.cache_dir) for target in (sdk_cache, distfiles_cache): src = os.path.join(constants.SOURCE_ROOT, os.path.basename(target)) if not os.path.exists(src): osutils.SafeMakedirs(target) continue lock.write_lock( "Upgrade to %r needed but chroot is locked; please exit " "all instances so this upgrade can finish." % src) if not os.path.exists(src): # Note that while waiting for the write lock, src may've vanished; # it's a rare race during the upgrade process that's a byproduct # of us avoiding taking a write lock to do the src check. If we # took a write lock for that check, it would effectively limit # all cros_sdk for a chroot to a single instance. osutils.SafeMakedirs(target) elif not os.path.exists(target): # Upgrade occurred, but a reversion, or something whacky # occurred writing to the old location. Wipe and continue. os.rename(src, target) else: # Upgrade occurred once already, but either a reversion or # some before/after separate cros_sdk usage is at play. # Wipe and continue. osutils.RmDir(src) if options.download: lock.write_lock() sdk_tarball = FetchRemoteTarballs(sdk_cache, urls) if options.create: lock.write_lock() CreateChroot(options.chroot, sdk_tarball, options.cache_dir, nousepkg=(options.bootstrap or options.nousepkg), nogetbinpkg=options.nogetbinpkg) if options.enter: lock.read_lock() EnterChroot(options.chroot, options.cache_dir, options.chrome_root, options.chrome_root_mount, chroot_command)
def testCreateGroups(self): """Run many cros_sdk processes in parallel to test for race conditions.""" with sudo.SudoKeepAlive(): with cgroups.SimpleContainChildren('example', sigterm_timeout=5): parallel.RunTasksInProcessPool(self._CrosSdk, [[]] * 20, processes=10)
def main(argv): conf = cros_build_lib.LoadKeyValueFile( os.path.join(constants.SOURCE_ROOT, constants.SDK_VERSION_FILE), ignore_missing=True) sdk_latest_version = conf.get('SDK_LATEST_VERSION', '<unknown>') bootstrap_latest_version = conf.get('BOOTSTRAP_LATEST_VERSION', '<unknown>') parser, commands = _CreateParser(sdk_latest_version, bootstrap_latest_version) options = parser.parse_args(argv) chroot_command = options.commands # Some sanity checks first, before we ask for sudo credentials. cros_build_lib.AssertOutsideChroot() host = os.uname()[4] if host != 'x86_64': parser.error( "cros_sdk is currently only supported on x86_64; you're running" " %s. Please find a x86_64 machine." % (host,)) _ReportMissing(osutils.FindMissingBinaries(NEEDED_TOOLS)) if options.proxy_sim: _ReportMissing(osutils.FindMissingBinaries(PROXY_NEEDED_TOOLS)) _ReExecuteIfNeeded([sys.argv[0]] + argv) if options.ns_pid: first_pid = namespaces.CreatePidNs() else: first_pid = None # Expand out the aliases... if options.replace: options.delete = options.create = True if options.bootstrap: options.create = True # If a command is not given, default to enter. # pylint: disable=protected-access # This _group_actions access sucks, but upstream decided to not include an # alternative to optparse's option_list, and this is what they recommend. options.enter |= not any(getattr(options, x.dest) for x in commands._group_actions) # pylint: enable=protected-access options.enter |= bool(chroot_command) if options.enter and options.delete and not options.create: parser.error("Trying to enter the chroot when --delete " "was specified makes no sense.") # Finally, discern if we need to create the chroot. chroot_exists = os.path.exists(options.chroot) if options.create or options.enter: # Only create if it's being wiped, or if it doesn't exist. if not options.delete and chroot_exists: options.create = False else: options.download = True # Finally, flip create if necessary. if options.enter: options.create |= not chroot_exists if not options.sdk_version: sdk_version = (bootstrap_latest_version if options.bootstrap else sdk_latest_version) else: sdk_version = options.sdk_version if options.buildbot_log_version: logging.PrintBuildbotStepText(sdk_version) # Based on selections, determine the tarball to fetch. if options.sdk_url: urls = [options.sdk_url] elif options.bootstrap: urls = GetStage3Urls(sdk_version) else: urls = GetArchStageTarballs(sdk_version) # Get URLs for the toolchains overlay, if one is to be used. toolchains_overlay_urls = None if not options.bootstrap: toolchains = None if options.toolchains: toolchains = options.toolchains.split(',') elif options.board: toolchains = toolchain.GetToolchainsForBoard(options.board).keys() if toolchains: toolchains_overlay_urls = GetToolchainsOverlayUrls(sdk_version, toolchains) lock_path = os.path.dirname(options.chroot) lock_path = os.path.join( lock_path, '.%s_lock' % os.path.basename(options.chroot).lstrip('.')) with cgroups.SimpleContainChildren('cros_sdk', pid=first_pid): with locking.FileLock(lock_path, 'chroot lock') as lock: toolchains_overlay_tarball = None if options.proxy_sim: _ProxySimSetup(options) if options.delete and os.path.exists(options.chroot): lock.write_lock() DeleteChroot(options.chroot) sdk_cache = os.path.join(options.cache_dir, 'sdks') distfiles_cache = os.path.join(options.cache_dir, 'distfiles') osutils.SafeMakedirsNonRoot(options.cache_dir) for target in (sdk_cache, distfiles_cache): src = os.path.join(constants.SOURCE_ROOT, os.path.basename(target)) if not os.path.exists(src): osutils.SafeMakedirsNonRoot(target) continue lock.write_lock( "Upgrade to %r needed but chroot is locked; please exit " "all instances so this upgrade can finish." % src) if not os.path.exists(src): # Note that while waiting for the write lock, src may've vanished; # it's a rare race during the upgrade process that's a byproduct # of us avoiding taking a write lock to do the src check. If we # took a write lock for that check, it would effectively limit # all cros_sdk for a chroot to a single instance. osutils.SafeMakedirsNonRoot(target) elif not os.path.exists(target): # Upgrade occurred, but a reversion, or something whacky # occurred writing to the old location. Wipe and continue. os.rename(src, target) else: # Upgrade occurred once already, but either a reversion or # some before/after separate cros_sdk usage is at play. # Wipe and continue. osutils.RmDir(src) if options.download: lock.write_lock() sdk_tarball = FetchRemoteTarballs( sdk_cache, urls, 'stage3' if options.bootstrap else 'SDK') if toolchains_overlay_urls: toolchains_overlay_tarball = FetchRemoteTarballs( sdk_cache, toolchains_overlay_urls, 'SDK toolchains overlay', allow_none=True) if options.create: lock.write_lock() CreateChroot(options.chroot, sdk_tarball, toolchains_overlay_tarball, options.cache_dir, nousepkg=(options.bootstrap or options.nousepkg)) if options.enter: lock.read_lock() EnterChroot(options.chroot, options.cache_dir, options.chrome_root, options.chrome_root_mount, options.workspace, chroot_command)
def main(argv): # Turn on strict sudo checks. cros_build_lib.STRICT_SUDO = True conf = key_value_store.LoadFile(os.path.join(constants.SOURCE_ROOT, constants.SDK_VERSION_FILE), ignore_missing=True) sdk_latest_version = conf.get('SDK_LATEST_VERSION', '<unknown>') bootstrap_frozen_version = conf.get('BOOTSTRAP_FROZEN_VERSION', '<unknown>') # Use latest SDK for bootstrapping if requested. Use a frozen version of SDK # for bootstrapping if BOOTSTRAP_FROZEN_VERSION is set. bootstrap_latest_version = (sdk_latest_version if bootstrap_frozen_version == '<unknown>' else bootstrap_frozen_version) parser, commands = _CreateParser(sdk_latest_version, bootstrap_latest_version) options = parser.parse_args(argv) chroot_command = options.commands # Some sanity checks first, before we ask for sudo credentials. cros_build_lib.AssertOutsideChroot() host = os.uname()[4] if host != 'x86_64': cros_build_lib.Die( "cros_sdk is currently only supported on x86_64; you're running" ' %s. Please find a x86_64 machine.' % (host, )) # Merge the outside PATH setting if we re-execed ourselves. if 'CHROMEOS_SUDO_PATH' in os.environ: os.environ['PATH'] = '%s:%s' % (os.environ.pop('CHROMEOS_SUDO_PATH'), os.environ['PATH']) _ReportMissing(osutils.FindMissingBinaries(NEEDED_TOOLS)) if options.proxy_sim: _ReportMissing(osutils.FindMissingBinaries(PROXY_NEEDED_TOOLS)) missing_image_tools = osutils.FindMissingBinaries(IMAGE_NEEDED_TOOLS) if (sdk_latest_version == '<unknown>' or bootstrap_latest_version == '<unknown>'): cros_build_lib.Die( 'No SDK version was found. ' 'Are you in a Chromium source tree instead of Chromium OS?\n\n' 'Please change to a directory inside your Chromium OS source tree\n' 'and retry. If you need to setup a Chromium OS source tree, see\n' ' https://dev.chromium.org/chromium-os/developer-guide') any_snapshot_operation = (options.snapshot_create or options.snapshot_restore or options.snapshot_delete or options.snapshot_list) if any_snapshot_operation and not options.use_image: cros_build_lib.Die('Snapshot operations are not compatible with ' '--nouse-image.') if (options.snapshot_delete and options.snapshot_delete == options.snapshot_restore): parser.error('Cannot --snapshot_delete the same snapshot you are ' 'restoring with --snapshot_restore.') _ReExecuteIfNeeded([sys.argv[0]] + argv) lock_path = os.path.dirname(options.chroot) lock_path = os.path.join( lock_path, '.%s_lock' % os.path.basename(options.chroot).lstrip('.')) # Expand out the aliases... if options.replace: options.delete = options.create = True if options.bootstrap: options.create = True # If a command is not given, default to enter. # pylint: disable=protected-access # This _group_actions access sucks, but upstream decided to not include an # alternative to optparse's option_list, and this is what they recommend. options.enter |= not any( getattr(options, x.dest) for x in commands._group_actions) # pylint: enable=protected-access options.enter |= bool(chroot_command) if (options.delete and not options.create and (options.enter or any_snapshot_operation)): parser.error('Trying to enter or snapshot the chroot when --delete ' 'was specified makes no sense.') if (options.unmount and (options.create or options.enter or any_snapshot_operation)): parser.error( '--unmount cannot be specified with other chroot actions.') if options.working_dir is not None and not os.path.isabs( options.working_dir): options.working_dir = path_util.ToChrootPath(options.working_dir) # Discern if we need to create the chroot. chroot_exists = cros_sdk_lib.IsChrootReady(options.chroot) if (options.use_image and not chroot_exists and not options.delete and not options.unmount and not missing_image_tools and os.path.exists(_ImageFileForChroot(options.chroot))): # Try to re-mount an existing image in case the user has rebooted. with cgroups.SimpleContainChildren('cros_sdk'): with locking.FileLock(lock_path, 'chroot lock') as lock: logging.debug( 'Checking if existing chroot image can be mounted.') lock.write_lock() cros_sdk_lib.MountChroot(options.chroot, create=False) chroot_exists = cros_sdk_lib.IsChrootReady(options.chroot) if chroot_exists: logging.notice('Mounted existing image %s on chroot', _ImageFileForChroot(options.chroot)) # Finally, flip create if necessary. if options.enter or options.snapshot_create: options.create |= not chroot_exists # Make sure we will download if we plan to create. options.download |= options.create # Anything that needs to manipulate the main chroot mount or communicate with # LVM needs to be done here before we enter the new namespaces. # If deleting, do it regardless of the use_image flag so that a # previously-created loopback chroot can also be cleaned up. # TODO(bmgordon): See if the DeleteChroot call below can be removed in # favor of this block. chroot_deleted = False if options.delete: with cgroups.SimpleContainChildren('cros_sdk'): # Set a timeout of 300 seconds when getting the lock. with locking.FileLock(lock_path, 'chroot lock', blocking_timeout=300) as lock: try: lock.write_lock() except timeout_util.TimeoutError as e: logging.error('Acquiring write_lock on %s failed: %s', lock_path, e) if not options.force: cros_build_lib.Die( 'Exiting; use --force to continue w/o lock.') else: logging.warning( 'cros_sdk was invoked with force option, continuing.' ) if missing_image_tools: logging.notice('Unmounting chroot.') osutils.UmountTree(options.chroot) else: logging.notice('Deleting chroot.') cros_sdk_lib.CleanupChrootMount(options.chroot, delete=True) chroot_deleted = True # If cleanup was requested, we have to do it while we're still in the original # namespace. Since cleaning up the mount will interfere with any other # commands, we exit here. The check above should have made sure that no other # action was requested, anyway. if options.unmount: # Set a timeout of 300 seconds when getting the lock. with locking.FileLock(lock_path, 'chroot lock', blocking_timeout=300) as lock: try: lock.write_lock() except timeout_util.TimeoutError as e: logging.error('Acquiring write_lock on %s failed: %s', lock_path, e) logging.warning( 'Continuing with CleanupChroot(%s), which will umount the tree.', options.chroot) # We can call CleanupChroot (which calls cros_sdk_lib.CleanupChrootMount) # even if we don't get the lock because it will attempt to unmount the # tree and will print diagnostic information from 'fuser', 'lsof', and # 'ps'. CleanupChroot(options.chroot) sys.exit(0) # Make sure the main chroot mount is visible. Contents will be filled in # below if needed. if options.create and options.use_image: if missing_image_tools: raise SystemExit("""The tool(s) %s were not found. Please make sure the lvm2 and thin-provisioning-tools packages are installed on your host. Example(ubuntu): sudo apt-get install lvm2 thin-provisioning-tools If you want to run without lvm2, pass --nouse-image (chroot snapshots will be unavailable).""" % ', '.join(missing_image_tools)) logging.debug('Making sure chroot image is mounted.') with cgroups.SimpleContainChildren('cros_sdk'): with locking.FileLock(lock_path, 'chroot lock') as lock: lock.write_lock() if not cros_sdk_lib.MountChroot(options.chroot, create=True): cros_build_lib.Die('Unable to mount %s on chroot', _ImageFileForChroot(options.chroot)) logging.notice('Mounted %s on chroot', _ImageFileForChroot(options.chroot)) # Snapshot operations will always need the VG/LV, but other actions won't. if any_snapshot_operation: with cgroups.SimpleContainChildren('cros_sdk'): with locking.FileLock(lock_path, 'chroot lock') as lock: chroot_vg, chroot_lv = cros_sdk_lib.FindChrootMountSource( options.chroot) if not chroot_vg or not chroot_lv: cros_build_lib.Die('Unable to find VG/LV for chroot %s', options.chroot) # Delete snapshot before creating a new one. This allows the user to # throw out old state, create a new snapshot, and enter the chroot in a # single call to cros_sdk. Since restore involves deleting, also do it # before creating. if options.snapshot_restore: lock.write_lock() valid_snapshots = ListChrootSnapshots(chroot_vg, chroot_lv) if options.snapshot_restore not in valid_snapshots: cros_build_lib.Die( '%s is not a valid snapshot to restore to. ' 'Valid snapshots: %s', options.snapshot_restore, ', '.join(valid_snapshots)) osutils.UmountTree(options.chroot) if not RestoreChrootSnapshot(options.snapshot_restore, chroot_vg, chroot_lv): cros_build_lib.Die( 'Unable to restore chroot to snapshot.') if not cros_sdk_lib.MountChroot(options.chroot, create=False): cros_build_lib.Die( 'Unable to mount restored snapshot onto chroot.') # Use a read lock for snapshot delete and create even though they modify # the filesystem, because they don't modify the mounted chroot itself. # The underlying LVM commands take their own locks, so conflicting # concurrent operations here may crash cros_sdk, but won't corrupt the # chroot image. This tradeoff seems worth it to allow snapshot # operations on chroots that have a process inside. if options.snapshot_delete: lock.read_lock() DeleteChrootSnapshot(options.snapshot_delete, chroot_vg, chroot_lv) if options.snapshot_create: lock.read_lock() if not CreateChrootSnapshot(options.snapshot_create, chroot_vg, chroot_lv): cros_build_lib.Die('Unable to create snapshot.') img_path = _ImageFileForChroot(options.chroot) if (options.use_image and os.path.exists(options.chroot) and os.path.exists(img_path)): img_stat = os.stat(img_path) img_used_bytes = img_stat.st_blocks * 512 mount_stat = os.statvfs(options.chroot) mount_used_bytes = mount_stat.f_frsize * (mount_stat.f_blocks - mount_stat.f_bfree) extra_gbs = (img_used_bytes - mount_used_bytes) // 2**30 if extra_gbs > MAX_UNUSED_IMAGE_GBS: logging.notice( '%s is using %s GiB more than needed. Running ' 'fstrim.', img_path, extra_gbs) cmd = ['fstrim', options.chroot] try: cros_build_lib.dbg_run(cmd) except cros_build_lib.RunCommandError as e: logging.warning( 'Running fstrim failed. Consider running fstrim on ' 'your chroot manually.\n%s', e) # Enter a new set of namespaces. Everything after here cannot directly affect # the hosts's mounts or alter LVM volumes. namespaces.SimpleUnshare() if options.ns_pid: first_pid = namespaces.CreatePidNs() else: first_pid = None if options.snapshot_list: for snap in ListChrootSnapshots(chroot_vg, chroot_lv): print(snap) sys.exit(0) if not options.sdk_version: sdk_version = (bootstrap_latest_version if options.bootstrap else sdk_latest_version) else: sdk_version = options.sdk_version if options.buildbot_log_version: logging.PrintBuildbotStepText(sdk_version) # Based on selections, determine the tarball to fetch. if options.download: if options.sdk_url: urls = [options.sdk_url] else: urls = GetArchStageTarballs(sdk_version) with cgroups.SimpleContainChildren('cros_sdk', pid=first_pid): with locking.FileLock(lock_path, 'chroot lock') as lock: if options.proxy_sim: _ProxySimSetup(options) if (options.delete and not chroot_deleted and (os.path.exists(options.chroot) or os.path.exists(_ImageFileForChroot(options.chroot)))): lock.write_lock() DeleteChroot(options.chroot) sdk_cache = os.path.join(options.cache_dir, 'sdks') distfiles_cache = os.path.join(options.cache_dir, 'distfiles') osutils.SafeMakedirsNonRoot(options.cache_dir) for target in (sdk_cache, distfiles_cache): src = os.path.join(constants.SOURCE_ROOT, os.path.basename(target)) if not os.path.exists(src): osutils.SafeMakedirsNonRoot(target) continue lock.write_lock( 'Upgrade to %r needed but chroot is locked; please exit ' 'all instances so this upgrade can finish.' % src) if not os.path.exists(src): # Note that while waiting for the write lock, src may've vanished; # it's a rare race during the upgrade process that's a byproduct # of us avoiding taking a write lock to do the src check. If we # took a write lock for that check, it would effectively limit # all cros_sdk for a chroot to a single instance. osutils.SafeMakedirsNonRoot(target) elif not os.path.exists(target): # Upgrade occurred, but a reversion, or something whacky # occurred writing to the old location. Wipe and continue. os.rename(src, target) else: # Upgrade occurred once already, but either a reversion or # some before/after separate cros_sdk usage is at play. # Wipe and continue. osutils.RmDir(src) if options.download: lock.write_lock() sdk_tarball = FetchRemoteTarballs( sdk_cache, urls, 'stage3' if options.bootstrap else 'SDK') if options.create: lock.write_lock() # Recheck if the chroot is set up here before creating to make sure we # account for whatever the various delete/unmount/remount steps above # have done. if cros_sdk_lib.IsChrootReady(options.chroot): logging.debug('Chroot already exists. Skipping creation.') else: CreateChroot(options.chroot, sdk_tarball, options.cache_dir, nousepkg=(options.bootstrap or options.nousepkg)) if options.enter: lock.read_lock() EnterChroot(options.chroot, options.cache_dir, options.chrome_root, options.chrome_root_mount, options.goma_dir, options.goma_client_json, options.working_dir, chroot_command)