def _ChrootCheck(self, service_options, method_options): """Check the chroot options, and execute assertion or note reexec as needed. Args: service_options (google.protobuf.Message): The service options. method_options (google.protobuf.Message): The method options. Returns: bool - True iff it needs to be reexeced inside the chroot. Raises: cros_build_lib.DieSystemExit when the chroot setting cannot be satisfied. """ chroot_assert = build_api_pb2.NO_ASSERTION if method_options.HasField('method_chroot_assert'): # Prefer the method option when set. chroot_assert = method_options.method_chroot_assert elif service_options.HasField('service_chroot_assert'): # Fall back to the service option. chroot_assert = service_options.service_chroot_assert if chroot_assert == build_api_pb2.INSIDE: return not cros_build_lib.IsInsideChroot() elif chroot_assert == build_api_pb2.OUTSIDE: # If it must be run outside we have to already be outside. cros_build_lib.AssertOutsideChroot() return False
def Create(arguments): """Create or replace the chroot. Args: arguments (CreateArguments): The various arguments to create a chroot. Returns: int - The version of the resulting chroot. """ cros_build_lib.AssertOutsideChroot() cmd = [os.path.join(constants.CHROMITE_BIN_DIR, 'cros_sdk')] cmd.extend(arguments.GetArgList()) cros_build_lib.run(cmd) version = GetChrootVersion(arguments.chroot_path) if not arguments.replace: # Force replace scenarios. Only needed when we're not already replacing it. if not version: # Force replace when we can't get a version for a chroot that exists, # since something must have gone wrong. logging.notice('Replacing broken chroot.') arguments.replace = True return Create(arguments) elif not cros_sdk_lib.IsChrootVersionValid(arguments.chroot_path): # Force replace when the version is not valid, i.e. ahead of the chroot # version hooks. logging.notice('Replacing chroot ahead of current checkout.') arguments.replace = True return Create(arguments) elif not cros_sdk_lib.IsChrootDirValid(arguments.chroot_path): # Force replace when the permissions or owner are not correct. logging.notice('Replacing chroot with invalid permissions.') arguments.replace = True return Create(arguments) return GetChrootVersion(arguments.chroot_path)
def main(argv): usage = """usage: %prog [options] [VAR1=val1 .. VARn=valn -- args] This script is used for manipulating local chroot environments; creating, deleting, downloading, etc. If given --enter (or no args), it defaults to an interactive bash shell within the chroot. If given args those are passed to the chroot environment, and executed.""" conf = cros_build_lib.LoadKeyValueFile(os.path.join( constants.SOURCE_ROOT, constants.SDK_VERSION_FILE), ignore_missing=True) sdk_latest_version = conf.get('COREOS_SDK_VERSION', '<unknown>') parser = commandline.OptionParser(usage=usage, caching=True) commands = parser.add_option_group("Commands") commands.add_option('--enter', action='store_true', default=False, help='Enter the SDK chroot. Implies --create.') commands.add_option( '--create', action='store_true', default=False, help='Create the chroot only if it does not already exist. ' 'Implies --download.') commands.add_option( '--bootstrap', action='store_true', default=False, help='Build everything from scratch, including the sdk. ' 'Use this only if you need to validate a change ' 'that affects SDK creation itself (toolchain and ' 'build are typically the only folk who need this). ' 'Note this will quite heavily slow down the build. ' 'This option implies --create --nousepkg.') commands.add_option( '-r', '--replace', action='store_true', default=False, help='Replace an existing SDK chroot. Basically an alias ' 'for --delete --create.') commands.add_option('--delete', action='store_true', default=False, help='Delete the current SDK chroot if it exists.') commands.add_option('--download', action='store_true', default=False, help='Download the sdk.') # Global options: default_chroot = os.path.join(constants.SOURCE_ROOT, constants.DEFAULT_CHROOT_DIR) parser.add_option('--chroot', dest='chroot', default=default_chroot, type='path', help=('SDK chroot dir name [%s]' % constants.DEFAULT_CHROOT_DIR)) parser.add_option('--chrome_root', default=None, type='path', help='Mount this chrome root into the SDK chroot') parser.add_option('--chrome_root_mount', default=None, type='path', help='Mount chrome into this path inside SDK chroot') parser.add_option( '--nousepkg', action='store_true', default=False, help='Do not use binary packages when creating a chroot.') parser.add_option('--nogetbinpkg', action='store_true', default=False, help='Do not fetch remote binary packages.') parser.add_option('-u', '--url', dest='sdk_url', default=None, help=('''Use sdk tarball located at this url. Use file:// for local files.''')) parser.add_option('--sdk-version', default=sdk_latest_version, help='Use this sdk version. Current is %default.') options, chroot_command = parser.parse_args(argv) # Some sanity checks first, before we ask for sudo credentials. cros_build_lib.AssertOutsideChroot() host = os.uname()[4] if host != 'x86_64': parser.error( "cros_sdk is currently only supported on x86_64; you're running" " %s. Please find a x86_64 machine." % (host, )) missing = osutils.FindMissingBinaries(NEEDED_TOOLS) if missing: parser.error( ('The tool(s) %s were not found.\n' 'Please install the appropriate package in your host.\n' 'Example(ubuntu):\n' ' sudo apt-get install <packagename>' % (', '.join(missing)))) _ReExecuteIfNeeded([sys.argv[0]] + argv) # Expand out the aliases... if options.replace: options.delete = options.create = True if options.bootstrap: options.create = True # If a command is not given, default to enter. options.enter |= not any( getattr(options, x.dest) for x in commands.option_list) options.enter |= bool(chroot_command) if options.enter and options.delete and not options.create: parser.error("Trying to enter the chroot when --delete " "was specified makes no sense.") # Finally, discern if we need to create the chroot. chroot_exists = os.path.exists(options.chroot) if options.create or options.enter: # Only create if it's being wiped, or if it doesn't exist. if not options.delete and chroot_exists: options.create = False else: options.download = True # Finally, flip create if necessary. if options.enter: options.create |= not chroot_exists # Based on selections, fetch the tarball. if options.sdk_url: urls = [options.sdk_url] else: urls = GetArchStageTarballs(options.sdk_version) lock_path = os.path.dirname(options.chroot) lock_path = os.path.join(lock_path, '.%s_lock' % os.path.basename(options.chroot)) with cgroups.SimpleContainChildren('cros_sdk'): with locking.FileLock(lock_path, 'chroot lock') as lock: if options.delete and os.path.exists(options.chroot): lock.write_lock() DeleteChroot(options.chroot) sdk_cache = os.path.join(options.cache_dir, 'sdks') distfiles_cache = os.path.join(options.cache_dir, 'distfiles') osutils.SafeMakedirs(options.cache_dir) for target in (sdk_cache, distfiles_cache): src = os.path.join(constants.SOURCE_ROOT, os.path.basename(target)) if not os.path.exists(src): osutils.SafeMakedirs(target) continue lock.write_lock( "Upgrade to %r needed but chroot is locked; please exit " "all instances so this upgrade can finish." % src) if not os.path.exists(src): # Note that while waiting for the write lock, src may've vanished; # it's a rare race during the upgrade process that's a byproduct # of us avoiding taking a write lock to do the src check. If we # took a write lock for that check, it would effectively limit # all cros_sdk for a chroot to a single instance. osutils.SafeMakedirs(target) elif not os.path.exists(target): # Upgrade occurred, but a reversion, or something whacky # occurred writing to the old location. Wipe and continue. os.rename(src, target) else: # Upgrade occurred once already, but either a reversion or # some before/after separate cros_sdk usage is at play. # Wipe and continue. osutils.RmDir(src) if options.download: lock.write_lock() sdk_tarball = FetchRemoteTarballs(sdk_cache, urls) if options.create: lock.write_lock() CreateChroot(options.chroot, sdk_tarball, options.cache_dir, nousepkg=(options.bootstrap or options.nousepkg), nogetbinpkg=options.nogetbinpkg) if options.enter: lock.read_lock() EnterChroot(options.chroot, options.cache_dir, options.chrome_root, options.chrome_root_mount, chroot_command)
def Run(self): """Perform the cros clean command.""" # If no option is set, default to "--safe" if not (self.options.safe or self.options.clobber or self.options.board or self.options.chroot or self.options.cache or self.options.deploy or self.options.flash or self.options.images or self.options.autotest or self.options.incrementals or self.options.chroot_tmp or self.options.sysroots): self.options.safe = True if self.options.clobber: self.options.chroot = True self.options.autotest = True self.options.safe = True if self.options.safe: self.options.cache = True self.options.chromite = True self.options.chroot_tmp = True self.options.deploy = True self.options.flash = True self.options.images = True self.options.incrementals = True self.options.logs = True self.options.workdirs = True self.options.Freeze() chroot_dir = self.options.sdk_path cros_build_lib.AssertOutsideChroot() def Clean(path): """Helper wrapper for the dry-run checks""" if self.options.dry_run: logging.notice('would have cleaned: %s', path) else: osutils.RmDir(path, ignore_missing=True, sudo=True) def Empty(path): """Helper wrapper for the dry-run checks""" if self.options.dry_run: logging.notice('would have emptied: %s', path) else: osutils.EmptyDir(path, ignore_missing=True, sudo=True) def CleanNoBindMount(path): # This test is a convenience for developers that bind mount these dirs. if not os.path.ismount(path): Clean(path) else: logging.debug('Ignoring bind mounted dir: %s', path) # Delete this first since many of the caches below live in the chroot. if self.options.chroot: logging.debug('Remove the chroot.') if self.options.dry_run: logging.notice('would have cleaned: %s', chroot_dir) else: cros_build_lib.run(['cros_sdk', '--delete']) boards = self.options.board or [] if self.options.sysroots: try: boards = os.listdir(os.path.join(chroot_dir, 'build')) except OSError as e: if e.errno != errno.ENOENT: raise for b in boards: logging.debug('Clean up the %s sysroot.', b) Clean(os.path.join(chroot_dir, 'build', b)) if self.options.chroot_tmp: logging.debug('Empty chroot tmp directory.') Empty(os.path.join(chroot_dir, 'tmp')) if self.options.cache: logging.debug('Clean the common cache.') CleanNoBindMount(self.options.cache_dir) # Recreate dirs that cros_sdk does when entering. # TODO: When sdk_lib/enter_chroot.sh is moved to chromite, we should unify # with those code paths. if not self.options.dry_run: for subdir in ('ccache', 'host', 'target'): osutils.SafeMakedirs( os.path.join(self.options.cache_dir, 'distfiles', subdir)) os.chmod( os.path.join(self.options.cache_dir, 'distfiles', 'ccache'), 0o2775) if self.options.chromite: logging.debug('Clean chromite workdirs.') Clean(os.path.join(constants.CHROMITE_DIR, 'venv', 'venv')) Clean(os.path.join(constants.CHROMITE_DIR, 'venv', '.venv_lock')) if self.options.deploy: logging.debug('Clean up the cros deploy cache.') for subdir in ('custom-packages', 'gmerge-packages'): for d in glob.glob(os.path.join(chroot_dir, 'build', '*', subdir)): Clean(d) if self.options.flash: logging.debug('Clean up the cros flash cache.') Clean(flash.DEVSERVER_STATIC_DIR) if self.options.images: logging.debug('Clean the images cache.') cache_dir = os.path.join(constants.SOURCE_ROOT, 'src', 'build') CleanNoBindMount(cache_dir) if self.options.incrementals: logging.debug('Clean package incremental objects.') Clean(os.path.join(chroot_dir, 'var', 'cache', 'portage')) for d in glob.glob( os.path.join(chroot_dir, 'build', '*', 'var', 'cache', 'portage')): Clean(d) if self.options.logs: logging.debug('Clean log files.') Clean(os.path.join(chroot_dir, 'var', 'log')) for d in glob.glob( os.path.join(chroot_dir, 'build', '*', 'tmp', 'portage', 'logs')): Clean(d) if self.options.workdirs: logging.debug('Clean package workdirs.') Clean(os.path.join(chroot_dir, 'var', 'tmp', 'portage')) Clean(os.path.join(constants.CHROMITE_DIR, 'venv', 'venv')) for d in glob.glob( os.path.join(chroot_dir, 'build', '*', 'tmp', 'portage')): Clean(d) if self.options.autotest: logging.debug('Clean build_externals.') packages_dir = os.path.join(constants.SOURCE_ROOT, 'src', 'third_party', 'autotest', 'files', 'site-packages') Clean(packages_dir)
def Run(self): """Perfrom the cros clean command.""" # If no option is set, default to "--safe" if not (self.options.safe or self.options.clobber or self.options.board or self.options.chroot or self.options.cache or self.options.deploy or self.options.flash or self.options.images or self.options.autotest or self.options.incrementals): self.options.safe = True if self.options.clobber: self.options.chroot = True self.options.autotest = True self.options.safe = True if self.options.safe: self.options.cache = True self.options.chromite = True self.options.deploy = True self.options.flash = True self.options.images = True self.options.incrementals = True self.options.logs = True self.options.workdirs = True self.options.Freeze() chroot_dir = os.path.join(constants.SOURCE_ROOT, constants.DEFAULT_CHROOT_DIR) cros_build_lib.AssertOutsideChroot() def Clean(path): """Helper wrapper for the dry-run checks""" if self.options.dry_run: logging.notice('would have cleaned: %s', path) else: osutils.RmDir(path, ignore_missing=True, sudo=True) def CleanNoBindMount(path): # This test is a convenience for developers that bind mount these dirs. if not os.path.ismount(path): Clean(path) else: logging.debug('Ignoring bind mounted dir: %s', self.options.path) # Delete this first since many of the caches below live in the chroot. if self.options.chroot: logging.debug('Remove the chroot.') if self.options.dry_run: logging.notice('would have cleaned: %s', chroot_dir) else: cros_build_lib.RunCommand(['cros_sdk', '--delete']) if self.options.board: for b in self.options.board: logging.debug('Clean up the %s build root.', b) Clean(os.path.join(chroot_dir, 'build', b)) if self.options.cache: logging.debug('Clean the common cache') CleanNoBindMount(self.options.cache_dir) if self.options.chromite: logging.debug('Clean chromite workdirs') Clean(os.path.join(constants.CHROMITE_DIR, 'venv', 'venv')) Clean(os.path.join(constants.CHROMITE_DIR, 'venv', '.venv_lock')) if self.options.deploy: logging.debug('Clean up the cros deploy cache.') for subdir in ('custom-packages', 'gmerge-packages'): for d in glob.glob( os.path.join(chroot_dir, 'build', '*', subdir)): Clean(d) if self.options.flash: logging.debug('Clean up the cros flash cache.') Clean(flash.DEVSERVER_STATIC_DIR) if self.options.images: logging.debug('Clean the images cache.') cache_dir = os.path.join(constants.SOURCE_ROOT, 'src', 'build') CleanNoBindMount(cache_dir) if self.options.incrementals: logging.debug('Clean package incremental objects') Clean(os.path.join(chroot_dir, 'var', 'cache', 'portage')) for d in glob.glob( os.path.join(chroot_dir, 'build', '*', 'var', 'cache', 'portage')): Clean(d) if self.options.logs: logging.debug('Clean log files') Clean(os.path.join(chroot_dir, 'var', 'log')) for d in glob.glob( os.path.join(chroot_dir, 'build', '*', 'tmp', 'portage', 'logs')): Clean(d) if self.options.workdirs: logging.debug('Clean package workdirs') Clean(os.path.join(chroot_dir, 'var', 'tmp', 'portage')) Clean(os.path.join(constants.CHROMITE_DIR, 'venv', 'venv')) for d in glob.glob( os.path.join(chroot_dir, 'build', '*', 'tmp', 'portage')): Clean(d) if self.options.autotest: logging.debug('Clean build_externals') packages_dir = os.path.join(constants.SOURCE_ROOT, 'src', 'third_party', 'autotest', 'files', 'site-packages') Clean(packages_dir)
def main(argv): conf = cros_build_lib.LoadKeyValueFile( os.path.join(constants.SOURCE_ROOT, constants.SDK_VERSION_FILE), ignore_missing=True) sdk_latest_version = conf.get('SDK_LATEST_VERSION', '<unknown>') bootstrap_latest_version = conf.get('BOOTSTRAP_LATEST_VERSION', '<unknown>') parser, commands = _CreateParser(sdk_latest_version, bootstrap_latest_version) options = parser.parse_args(argv) chroot_command = options.commands # Some sanity checks first, before we ask for sudo credentials. cros_build_lib.AssertOutsideChroot() host = os.uname()[4] if host != 'x86_64': parser.error( "cros_sdk is currently only supported on x86_64; you're running" " %s. Please find a x86_64 machine." % (host,)) _ReportMissing(osutils.FindMissingBinaries(NEEDED_TOOLS)) if options.proxy_sim: _ReportMissing(osutils.FindMissingBinaries(PROXY_NEEDED_TOOLS)) _ReExecuteIfNeeded([sys.argv[0]] + argv) if options.ns_pid: first_pid = namespaces.CreatePidNs() else: first_pid = None # Expand out the aliases... if options.replace: options.delete = options.create = True if options.bootstrap: options.create = True # If a command is not given, default to enter. # pylint: disable=protected-access # This _group_actions access sucks, but upstream decided to not include an # alternative to optparse's option_list, and this is what they recommend. options.enter |= not any(getattr(options, x.dest) for x in commands._group_actions) # pylint: enable=protected-access options.enter |= bool(chroot_command) if options.enter and options.delete and not options.create: parser.error("Trying to enter the chroot when --delete " "was specified makes no sense.") # Finally, discern if we need to create the chroot. chroot_exists = os.path.exists(options.chroot) if options.create or options.enter: # Only create if it's being wiped, or if it doesn't exist. if not options.delete and chroot_exists: options.create = False else: options.download = True # Finally, flip create if necessary. if options.enter: options.create |= not chroot_exists if not options.sdk_version: sdk_version = (bootstrap_latest_version if options.bootstrap else sdk_latest_version) else: sdk_version = options.sdk_version if options.buildbot_log_version: logging.PrintBuildbotStepText(sdk_version) # Based on selections, determine the tarball to fetch. if options.sdk_url: urls = [options.sdk_url] elif options.bootstrap: urls = GetStage3Urls(sdk_version) else: urls = GetArchStageTarballs(sdk_version) # Get URLs for the toolchains overlay, if one is to be used. toolchains_overlay_urls = None if not options.bootstrap: toolchains = None if options.toolchains: toolchains = options.toolchains.split(',') elif options.board: toolchains = toolchain.GetToolchainsForBoard(options.board).keys() if toolchains: toolchains_overlay_urls = GetToolchainsOverlayUrls(sdk_version, toolchains) lock_path = os.path.dirname(options.chroot) lock_path = os.path.join( lock_path, '.%s_lock' % os.path.basename(options.chroot).lstrip('.')) with cgroups.SimpleContainChildren('cros_sdk', pid=first_pid): with locking.FileLock(lock_path, 'chroot lock') as lock: toolchains_overlay_tarball = None if options.proxy_sim: _ProxySimSetup(options) if options.delete and os.path.exists(options.chroot): lock.write_lock() DeleteChroot(options.chroot) sdk_cache = os.path.join(options.cache_dir, 'sdks') distfiles_cache = os.path.join(options.cache_dir, 'distfiles') osutils.SafeMakedirsNonRoot(options.cache_dir) for target in (sdk_cache, distfiles_cache): src = os.path.join(constants.SOURCE_ROOT, os.path.basename(target)) if not os.path.exists(src): osutils.SafeMakedirsNonRoot(target) continue lock.write_lock( "Upgrade to %r needed but chroot is locked; please exit " "all instances so this upgrade can finish." % src) if not os.path.exists(src): # Note that while waiting for the write lock, src may've vanished; # it's a rare race during the upgrade process that's a byproduct # of us avoiding taking a write lock to do the src check. If we # took a write lock for that check, it would effectively limit # all cros_sdk for a chroot to a single instance. osutils.SafeMakedirsNonRoot(target) elif not os.path.exists(target): # Upgrade occurred, but a reversion, or something whacky # occurred writing to the old location. Wipe and continue. os.rename(src, target) else: # Upgrade occurred once already, but either a reversion or # some before/after separate cros_sdk usage is at play. # Wipe and continue. osutils.RmDir(src) if options.download: lock.write_lock() sdk_tarball = FetchRemoteTarballs( sdk_cache, urls, 'stage3' if options.bootstrap else 'SDK') if toolchains_overlay_urls: toolchains_overlay_tarball = FetchRemoteTarballs( sdk_cache, toolchains_overlay_urls, 'SDK toolchains overlay', allow_none=True) if options.create: lock.write_lock() CreateChroot(options.chroot, sdk_tarball, toolchains_overlay_tarball, options.cache_dir, nousepkg=(options.bootstrap or options.nousepkg)) if options.enter: lock.read_lock() EnterChroot(options.chroot, options.cache_dir, options.chrome_root, options.chrome_root_mount, options.workspace, chroot_command)
def main(argv): # We get false positives with the options object. # pylint: disable=attribute-defined-outside-init # Turn on strict sudo checks. cros_build_lib.STRICT_SUDO = True # Set umask to 022 so files created by buildbot are readable. os.umask(0o22) parser = _CreateParser() options = ParseCommandLine(parser, argv) # Fetch our site_config now, because we need it to do anything else. site_config = config_lib.GetConfig() _PostParseCheck(parser, options, site_config) cros_build_lib.AssertOutsideChroot() if options.enable_buildbot_tags: logging.EnableBuildbotMarkers() if (options.buildbot and not options.debug and not options.build_config_name == constants.BRANCH_UTIL_CONFIG and not cros_build_lib.HostIsCIBuilder()): # --buildbot can only be used on a real builder, unless it's debug, or # 'branch-util'. cros_build_lib.Die('This host is not a supported build machine.') # Only one config arg is allowed in this mode, which was confirmed earlier. build_config = site_config[options.build_config_name] # TODO: Re-enable this block when reference_repo support handles this # properly. (see chromium:330775) # if options.reference_repo is None: # repo_path = os.path.join(options.sourceroot, '.repo') # # If we're being run from a repo checkout, reuse the repo's git pool to # # cut down on sync time. # if os.path.exists(repo_path): # options.reference_repo = options.sourceroot if options.reference_repo: if not os.path.exists(options.reference_repo): parser.error('Reference path %s does not exist' % (options.reference_repo, )) elif not os.path.exists(os.path.join(options.reference_repo, '.repo')): parser.error('Reference path %s does not look to be the base of a ' 'repo checkout; no .repo exists in the root.' % (options.reference_repo, )) if (options.buildbot or options.remote_trybot) and not options.resume: if not options.cgroups: parser.error( 'Options --buildbot/--remote-trybot and --nocgroups cannot ' 'be used together. Cgroup support is required for ' 'buildbot/remote-trybot mode.') if not cgroups.Cgroup.IsSupported(): parser.error( 'Option --buildbot/--remote-trybot was given, but this ' 'system does not support cgroups. Failing.') missing = osutils.FindMissingBinaries(_BUILDBOT_REQUIRED_BINARIES) if missing: parser.error( 'Option --buildbot/--remote-trybot requires the following ' "binaries which couldn't be found in $PATH: %s" % (', '.join(missing))) if options.reference_repo: options.reference_repo = os.path.abspath(options.reference_repo) # Sanity check of buildroot- specifically that it's not pointing into the # midst of an existing repo since git-repo doesn't support nesting. if (not repository.IsARepoRoot(options.buildroot) and git.FindRepoDir(options.buildroot)): cros_build_lib.Die( 'Configured buildroot %s is a subdir of an existing repo checkout.' % options.buildroot) if not options.log_dir: options.log_dir = os.path.join(options.buildroot, _DEFAULT_LOG_DIR) log_file = None if options.tee: log_file = os.path.join(options.log_dir, _BUILDBOT_LOG_FILE) osutils.SafeMakedirs(options.log_dir) _BackupPreviousLog(log_file) with cros_build_lib.ContextManagerStack() as stack: options.preserve_paths = set() if log_file is not None: # We don't want the critical section to try to clean up the tee process, # so we run Tee (forked off) outside of it. This prevents a deadlock # because the Tee process only exits when its pipe is closed, and the # critical section accidentally holds on to that file handle. stack.Add(tee.Tee, log_file) options.preserve_paths.add(_DEFAULT_LOG_DIR) critical_section = stack.Add(cleanup.EnforcedCleanupSection) stack.Add(sudo.SudoKeepAlive) if not options.resume: # If we're in resume mode, use our parents tempdir rather than # nesting another layer. stack.Add(osutils.TempDir, prefix='cbuildbot-tmp', set_global=True) logging.debug('Cbuildbot tempdir is %r.', os.environ.get('TMP')) if options.cgroups: stack.Add(cgroups.SimpleContainChildren, 'cbuildbot') # Mark everything between EnforcedCleanupSection and here as having to # be rolled back via the contextmanager cleanup handlers. This # ensures that sudo bits cannot outlive cbuildbot, that anything # cgroups would kill gets killed, etc. stack.Add(critical_section.ForkWatchdog) if options.mock_tree_status is not None: stack.Add(_ObjectMethodPatcher, tree_status, '_GetStatus', return_value=options.mock_tree_status) if options.mock_slave_status is not None: with open(options.mock_slave_status, 'r') as f: mock_statuses = pickle.load(f) for key, value in mock_statuses.iteritems(): mock_statuses[key] = builder_status_lib.BuilderStatus( **value) stack.Add(_ObjectMethodPatcher, completion_stages.MasterSlaveSyncCompletionStage, '_FetchSlaveStatuses', return_value=mock_statuses) stack.Add(_SetupConnections, options, build_config) retry_stats.SetupStats() timeout_display_message = None # For master-slave builds: Update slave's timeout using master's published # deadline. if options.buildbot and options.master_build_id is not None: slave_timeout = None if cidb.CIDBConnectionFactory.IsCIDBSetup(): cidb_handle = cidb.CIDBConnectionFactory.GetCIDBConnectionForBuilder( ) if cidb_handle: slave_timeout = cidb_handle.GetTimeToDeadline( options.master_build_id) if slave_timeout is not None: # We artificially set a minimum slave_timeout because '0' is handled # specially, and because we don't want to timeout while trying to set # things up. slave_timeout = max(slave_timeout, 20) if options.timeout == 0 or slave_timeout < options.timeout: logging.info( 'Updating slave build timeout to %d seconds enforced ' 'by the master', slave_timeout) options.timeout = slave_timeout timeout_display_message = ( 'This build has reached the timeout deadline set by the master. ' 'Either this stage or a previous one took too long (see stage ' 'timing historical summary in ReportStage) or the build failed ' 'to start on time.') else: logging.warning( 'Could not get master deadline for master-slave build. ' 'Can not set slave timeout.') if options.timeout > 0: stack.Add(timeout_util.FatalTimeout, options.timeout, timeout_display_message) try: _RunBuildStagesWrapper(options, site_config, build_config) except failures_lib.ExitEarlyException as ex: # This build finished successfully. Do not re-raise ExitEarlyException. logging.info('One stage exited early: %s', ex)
def main(argv): # Turn on strict sudo checks. cros_build_lib.STRICT_SUDO = True conf = key_value_store.LoadFile(os.path.join(constants.SOURCE_ROOT, constants.SDK_VERSION_FILE), ignore_missing=True) sdk_latest_version = conf.get('SDK_LATEST_VERSION', '<unknown>') bootstrap_frozen_version = conf.get('BOOTSTRAP_FROZEN_VERSION', '<unknown>') # Use latest SDK for bootstrapping if requested. Use a frozen version of SDK # for bootstrapping if BOOTSTRAP_FROZEN_VERSION is set. bootstrap_latest_version = (sdk_latest_version if bootstrap_frozen_version == '<unknown>' else bootstrap_frozen_version) parser, commands = _CreateParser(sdk_latest_version, bootstrap_latest_version) options = parser.parse_args(argv) chroot_command = options.commands # Some sanity checks first, before we ask for sudo credentials. cros_build_lib.AssertOutsideChroot() host = os.uname()[4] if host != 'x86_64': cros_build_lib.Die( "cros_sdk is currently only supported on x86_64; you're running" ' %s. Please find a x86_64 machine.' % (host, )) # Merge the outside PATH setting if we re-execed ourselves. if 'CHROMEOS_SUDO_PATH' in os.environ: os.environ['PATH'] = '%s:%s' % (os.environ.pop('CHROMEOS_SUDO_PATH'), os.environ['PATH']) _ReportMissing(osutils.FindMissingBinaries(NEEDED_TOOLS)) if options.proxy_sim: _ReportMissing(osutils.FindMissingBinaries(PROXY_NEEDED_TOOLS)) missing_image_tools = osutils.FindMissingBinaries(IMAGE_NEEDED_TOOLS) if (sdk_latest_version == '<unknown>' or bootstrap_latest_version == '<unknown>'): cros_build_lib.Die( 'No SDK version was found. ' 'Are you in a Chromium source tree instead of Chromium OS?\n\n' 'Please change to a directory inside your Chromium OS source tree\n' 'and retry. If you need to setup a Chromium OS source tree, see\n' ' https://dev.chromium.org/chromium-os/developer-guide') any_snapshot_operation = (options.snapshot_create or options.snapshot_restore or options.snapshot_delete or options.snapshot_list) if any_snapshot_operation and not options.use_image: cros_build_lib.Die('Snapshot operations are not compatible with ' '--nouse-image.') if (options.snapshot_delete and options.snapshot_delete == options.snapshot_restore): parser.error('Cannot --snapshot_delete the same snapshot you are ' 'restoring with --snapshot_restore.') _ReExecuteIfNeeded([sys.argv[0]] + argv) lock_path = os.path.dirname(options.chroot) lock_path = os.path.join( lock_path, '.%s_lock' % os.path.basename(options.chroot).lstrip('.')) # Expand out the aliases... if options.replace: options.delete = options.create = True if options.bootstrap: options.create = True # If a command is not given, default to enter. # pylint: disable=protected-access # This _group_actions access sucks, but upstream decided to not include an # alternative to optparse's option_list, and this is what they recommend. options.enter |= not any( getattr(options, x.dest) for x in commands._group_actions) # pylint: enable=protected-access options.enter |= bool(chroot_command) if (options.delete and not options.create and (options.enter or any_snapshot_operation)): parser.error('Trying to enter or snapshot the chroot when --delete ' 'was specified makes no sense.') if (options.unmount and (options.create or options.enter or any_snapshot_operation)): parser.error( '--unmount cannot be specified with other chroot actions.') if options.working_dir is not None and not os.path.isabs( options.working_dir): options.working_dir = path_util.ToChrootPath(options.working_dir) # Discern if we need to create the chroot. chroot_exists = cros_sdk_lib.IsChrootReady(options.chroot) if (options.use_image and not chroot_exists and not options.delete and not options.unmount and not missing_image_tools and os.path.exists(_ImageFileForChroot(options.chroot))): # Try to re-mount an existing image in case the user has rebooted. with cgroups.SimpleContainChildren('cros_sdk'): with locking.FileLock(lock_path, 'chroot lock') as lock: logging.debug( 'Checking if existing chroot image can be mounted.') lock.write_lock() cros_sdk_lib.MountChroot(options.chroot, create=False) chroot_exists = cros_sdk_lib.IsChrootReady(options.chroot) if chroot_exists: logging.notice('Mounted existing image %s on chroot', _ImageFileForChroot(options.chroot)) # Finally, flip create if necessary. if options.enter or options.snapshot_create: options.create |= not chroot_exists # Make sure we will download if we plan to create. options.download |= options.create # Anything that needs to manipulate the main chroot mount or communicate with # LVM needs to be done here before we enter the new namespaces. # If deleting, do it regardless of the use_image flag so that a # previously-created loopback chroot can also be cleaned up. # TODO(bmgordon): See if the DeleteChroot call below can be removed in # favor of this block. chroot_deleted = False if options.delete: with cgroups.SimpleContainChildren('cros_sdk'): # Set a timeout of 300 seconds when getting the lock. with locking.FileLock(lock_path, 'chroot lock', blocking_timeout=300) as lock: try: lock.write_lock() except timeout_util.TimeoutError as e: logging.error('Acquiring write_lock on %s failed: %s', lock_path, e) if not options.force: cros_build_lib.Die( 'Exiting; use --force to continue w/o lock.') else: logging.warning( 'cros_sdk was invoked with force option, continuing.' ) if missing_image_tools: logging.notice('Unmounting chroot.') osutils.UmountTree(options.chroot) else: logging.notice('Deleting chroot.') cros_sdk_lib.CleanupChrootMount(options.chroot, delete=True) chroot_deleted = True # If cleanup was requested, we have to do it while we're still in the original # namespace. Since cleaning up the mount will interfere with any other # commands, we exit here. The check above should have made sure that no other # action was requested, anyway. if options.unmount: # Set a timeout of 300 seconds when getting the lock. with locking.FileLock(lock_path, 'chroot lock', blocking_timeout=300) as lock: try: lock.write_lock() except timeout_util.TimeoutError as e: logging.error('Acquiring write_lock on %s failed: %s', lock_path, e) logging.warning( 'Continuing with CleanupChroot(%s), which will umount the tree.', options.chroot) # We can call CleanupChroot (which calls cros_sdk_lib.CleanupChrootMount) # even if we don't get the lock because it will attempt to unmount the # tree and will print diagnostic information from 'fuser', 'lsof', and # 'ps'. CleanupChroot(options.chroot) sys.exit(0) # Make sure the main chroot mount is visible. Contents will be filled in # below if needed. if options.create and options.use_image: if missing_image_tools: raise SystemExit("""The tool(s) %s were not found. Please make sure the lvm2 and thin-provisioning-tools packages are installed on your host. Example(ubuntu): sudo apt-get install lvm2 thin-provisioning-tools If you want to run without lvm2, pass --nouse-image (chroot snapshots will be unavailable).""" % ', '.join(missing_image_tools)) logging.debug('Making sure chroot image is mounted.') with cgroups.SimpleContainChildren('cros_sdk'): with locking.FileLock(lock_path, 'chroot lock') as lock: lock.write_lock() if not cros_sdk_lib.MountChroot(options.chroot, create=True): cros_build_lib.Die('Unable to mount %s on chroot', _ImageFileForChroot(options.chroot)) logging.notice('Mounted %s on chroot', _ImageFileForChroot(options.chroot)) # Snapshot operations will always need the VG/LV, but other actions won't. if any_snapshot_operation: with cgroups.SimpleContainChildren('cros_sdk'): with locking.FileLock(lock_path, 'chroot lock') as lock: chroot_vg, chroot_lv = cros_sdk_lib.FindChrootMountSource( options.chroot) if not chroot_vg or not chroot_lv: cros_build_lib.Die('Unable to find VG/LV for chroot %s', options.chroot) # Delete snapshot before creating a new one. This allows the user to # throw out old state, create a new snapshot, and enter the chroot in a # single call to cros_sdk. Since restore involves deleting, also do it # before creating. if options.snapshot_restore: lock.write_lock() valid_snapshots = ListChrootSnapshots(chroot_vg, chroot_lv) if options.snapshot_restore not in valid_snapshots: cros_build_lib.Die( '%s is not a valid snapshot to restore to. ' 'Valid snapshots: %s', options.snapshot_restore, ', '.join(valid_snapshots)) osutils.UmountTree(options.chroot) if not RestoreChrootSnapshot(options.snapshot_restore, chroot_vg, chroot_lv): cros_build_lib.Die( 'Unable to restore chroot to snapshot.') if not cros_sdk_lib.MountChroot(options.chroot, create=False): cros_build_lib.Die( 'Unable to mount restored snapshot onto chroot.') # Use a read lock for snapshot delete and create even though they modify # the filesystem, because they don't modify the mounted chroot itself. # The underlying LVM commands take their own locks, so conflicting # concurrent operations here may crash cros_sdk, but won't corrupt the # chroot image. This tradeoff seems worth it to allow snapshot # operations on chroots that have a process inside. if options.snapshot_delete: lock.read_lock() DeleteChrootSnapshot(options.snapshot_delete, chroot_vg, chroot_lv) if options.snapshot_create: lock.read_lock() if not CreateChrootSnapshot(options.snapshot_create, chroot_vg, chroot_lv): cros_build_lib.Die('Unable to create snapshot.') img_path = _ImageFileForChroot(options.chroot) if (options.use_image and os.path.exists(options.chroot) and os.path.exists(img_path)): img_stat = os.stat(img_path) img_used_bytes = img_stat.st_blocks * 512 mount_stat = os.statvfs(options.chroot) mount_used_bytes = mount_stat.f_frsize * (mount_stat.f_blocks - mount_stat.f_bfree) extra_gbs = (img_used_bytes - mount_used_bytes) // 2**30 if extra_gbs > MAX_UNUSED_IMAGE_GBS: logging.notice( '%s is using %s GiB more than needed. Running ' 'fstrim.', img_path, extra_gbs) cmd = ['fstrim', options.chroot] try: cros_build_lib.dbg_run(cmd) except cros_build_lib.RunCommandError as e: logging.warning( 'Running fstrim failed. Consider running fstrim on ' 'your chroot manually.\n%s', e) # Enter a new set of namespaces. Everything after here cannot directly affect # the hosts's mounts or alter LVM volumes. namespaces.SimpleUnshare() if options.ns_pid: first_pid = namespaces.CreatePidNs() else: first_pid = None if options.snapshot_list: for snap in ListChrootSnapshots(chroot_vg, chroot_lv): print(snap) sys.exit(0) if not options.sdk_version: sdk_version = (bootstrap_latest_version if options.bootstrap else sdk_latest_version) else: sdk_version = options.sdk_version if options.buildbot_log_version: logging.PrintBuildbotStepText(sdk_version) # Based on selections, determine the tarball to fetch. if options.download: if options.sdk_url: urls = [options.sdk_url] else: urls = GetArchStageTarballs(sdk_version) with cgroups.SimpleContainChildren('cros_sdk', pid=first_pid): with locking.FileLock(lock_path, 'chroot lock') as lock: if options.proxy_sim: _ProxySimSetup(options) if (options.delete and not chroot_deleted and (os.path.exists(options.chroot) or os.path.exists(_ImageFileForChroot(options.chroot)))): lock.write_lock() DeleteChroot(options.chroot) sdk_cache = os.path.join(options.cache_dir, 'sdks') distfiles_cache = os.path.join(options.cache_dir, 'distfiles') osutils.SafeMakedirsNonRoot(options.cache_dir) for target in (sdk_cache, distfiles_cache): src = os.path.join(constants.SOURCE_ROOT, os.path.basename(target)) if not os.path.exists(src): osutils.SafeMakedirsNonRoot(target) continue lock.write_lock( 'Upgrade to %r needed but chroot is locked; please exit ' 'all instances so this upgrade can finish.' % src) if not os.path.exists(src): # Note that while waiting for the write lock, src may've vanished; # it's a rare race during the upgrade process that's a byproduct # of us avoiding taking a write lock to do the src check. If we # took a write lock for that check, it would effectively limit # all cros_sdk for a chroot to a single instance. osutils.SafeMakedirsNonRoot(target) elif not os.path.exists(target): # Upgrade occurred, but a reversion, or something whacky # occurred writing to the old location. Wipe and continue. os.rename(src, target) else: # Upgrade occurred once already, but either a reversion or # some before/after separate cros_sdk usage is at play. # Wipe and continue. osutils.RmDir(src) if options.download: lock.write_lock() sdk_tarball = FetchRemoteTarballs( sdk_cache, urls, 'stage3' if options.bootstrap else 'SDK') if options.create: lock.write_lock() # Recheck if the chroot is set up here before creating to make sure we # account for whatever the various delete/unmount/remount steps above # have done. if cros_sdk_lib.IsChrootReady(options.chroot): logging.debug('Chroot already exists. Skipping creation.') else: CreateChroot(options.chroot, sdk_tarball, options.cache_dir, nousepkg=(options.bootstrap or options.nousepkg)) if options.enter: lock.read_lock() EnterChroot(options.chroot, options.cache_dir, options.chrome_root, options.chrome_root_mount, options.goma_dir, options.goma_client_json, options.working_dir, chroot_command)