def PerformStage(self): chroot_dir = os.path.join(self._build_root, constants.DEFAULT_CHROOT_DIR) sdk_dir = os.path.join(chroot_dir, 'build/amd64-host') tmp_dir = os.path.join(chroot_dir, 'tmp') osutils.SafeMakedirs(tmp_dir, mode=0o777, sudo=True) overlay_output_dir = os.path.join(chroot_dir, constants.SDK_OVERLAYS_OUTPUT) osutils.RmDir(overlay_output_dir, ignore_missing=True, sudo=True) osutils.SafeMakedirs(overlay_output_dir, mode=0o777, sudo=True) overlay_tarball_template = os.path.join( overlay_output_dir, TOOLCHAINS_OVERLAY_TARBALL_TEMPLATE) # Generate an overlay tarball for each unique toolchain combination. We # restrict ourselves to (a) board configs that are available to the builder # (naturally), and (b) toolchains that are part of the 'sdk' set. sdk_toolchains = set(toolchain.GetToolchainsForBoard('sdk')) generated = set() for board in self._run.site_config.GetBoards(): try: toolchains = set(toolchain.GetToolchainsForBoard(board).iterkeys()) except portage_util.MissingOverlayException: # The board overlay may not exist, e.g. on external builders. continue toolchains_str = '-'.join(sorted(toolchains)) if not toolchains.issubset(sdk_toolchains) or toolchains_str in generated: continue with osutils.TempDir(prefix='toolchains-overlay-%s.' % toolchains_str, base_dir=tmp_dir, sudo_rm=True) as overlay_dir: # NOTE: We let MountOverlayContext remove the mount point created by # the TempDir context below, because it has built-in retries for rmdir # EBUSY errors that are due to unmount lag. with osutils.TempDir(prefix='amd64-host-%s.' % toolchains_str, base_dir=tmp_dir, delete=False) as merged_dir: with osutils.MountOverlayContext(sdk_dir, overlay_dir, merged_dir, cleanup=True): sysroot = merged_dir[len(chroot_dir):] cmd = ['cros_setup_toolchains', '--targets=boards', '--include-boards=%s' % board, '--sysroot=%s' % sysroot] commands.RunBuildScript(self._build_root, cmd, chromite_cmd=True, enter_chroot=True, sudo=True, extra_env=self._portage_extra_env) # NOTE: Make sure that the overlay directory is owned root:root and has # 0o755 perms; apparently, these things are preserved through # tarring/untarring and might cause havoc if overlooked. os.chmod(overlay_dir, 0o755) cros_build_lib.SudoRunCommand(['chown', 'root:root', overlay_dir]) CreateTarball(overlay_dir, overlay_tarball_template % {'toolchains': toolchains_str}) generated.add(toolchains_str)
def testSkipCleanupGlobal(self): """Test that we reset global tempdir as expected even with skip.""" with osutils.TempDir(prefix=self.PREFIX, set_global=True) as tempdir: tempdir_before = osutils.GetGlobalTempDir() tempdir_obj = osutils.TempDir(prefix=self.PREFIX, set_global=True, delete=False) tempdir_inside = osutils.GetGlobalTempDir() tempdir_obj.Cleanup() tempdir_after = osutils.GetGlobalTempDir() # We shouldn't leak the outer directory. self.assertNotExists(tempdir) self.assertEqual(tempdir_before, tempdir_after) # This is a strict substring check. self.assertLess(tempdir_before, tempdir_inside)
def Run(self): """Run cros uprevchrome. Raises: Exception if the PFQ build_id is not valid. Exception if UprevChrome raises exceptions. """ self.options.Freeze() # Delay import so sqlalchemy isn't pulled in until we need it. from chromite.lib import cidb cidb_creds = self.options.cred_dir if cidb_creds is None: try: cidb_creds = cros_cidbcreds.CheckAndGetCIDBCreds() except: logging.error('Failed to download CIDB creds from gs.\n' 'Can try obtaining your credentials at ' 'go/cros-cidb-admin and manually passing it in ' 'with --cred-dir.') raise db = cidb.CIDBConnection(cidb_creds) build_number = self.ValidatePFQBuild(self.options.pfq_build, db) with osutils.TempDir(prefix='uprevchrome_', delete=self.options.wipe) as work_dir: self.UprevChrome(work_dir, self.options.pfq_build, build_number) logging.info('Used working directory: %s', work_dir)
def Install(self, cpv, url): """Install the debug symbols for |cpv|. This will install the debug symbols tarball in PKGDIR so that it can be used later. Args: cpv: the cpv of the package to build. This assumes that the cpv is installed in the sysroot. url: url of the debug symbols archive. This could be a Google Storage url or a local path. """ archive = os.path.join(self._vartree.settings['PKGDIR'], cpv + DEBUG_SYMS_EXT) # GsContext does not understand file:// scheme so we need to extract the # path ourselves. parsed_url = urllib.parse.urlsplit(url) if not parsed_url.scheme or parsed_url.scheme == 'file': url = parsed_url.path if not os.path.isfile(archive): self._gs_context.Copy(url, archive, debug_level=logging.DEBUG) with osutils.TempDir(sudo_rm=True) as tempdir: cros_build_lib.sudo_run( ['tar', '-I', 'bzip2 -q', '-xf', archive, '-C', tempdir], quiet=True) with open(self._vartree.getpath(cpv, filename='CONTENTS'), 'a') as content_file: # Merge the content of the temporary dir into the sysroot. # pylint: disable=protected-access link = self._vartree.dbapi._dblink(cpv) link.mergeme(tempdir, self._sysroot, content_file, None, '', {}, None)
def RemoveKnownHost(host, known_hosts_path=KNOWN_HOSTS_PATH): """Removes |host| from a known_hosts file. `ssh-keygen -R` doesn't work on bind mounted files as they can only be updated in place. Since we bind mount the default known_hosts file when entering the chroot, this function provides an alternate way to remove hosts from the file. Args: host: The host name to remove from the known_hosts file. known_hosts_path: Path to the known_hosts file to change. Defaults to the standard SSH known_hosts file path. Raises: cros_build_lib.RunCommandError if ssh-keygen fails. """ # `ssh-keygen -R` creates a backup file to retain the old 'known_hosts' # content and never deletes it. Using TempDir here to make sure both the temp # files created by us and `ssh-keygen -R` are deleted afterwards. with osutils.TempDir(prefix='remote-access-') as tempdir: temp_file = os.path.join(tempdir, 'temp_known_hosts') try: # Using shutil.copy2 to preserve the file ownership and permissions. shutil.copy2(known_hosts_path, temp_file) except IOError: # If |known_hosts_path| doesn't exist neither does |host| so we're done. return cros_build_lib.RunCommand(['ssh-keygen', '-R', host, '-f', temp_file], quiet=True) shutil.copy2(temp_file, known_hosts_path)
def PerformStage(self): self.Initialize() with osutils.TempDir() as tempdir: # Save off the last manifest. fresh_sync = True if os.path.exists(self.repo.directory) and not self._run.options.clobber: old_filename = os.path.join(tempdir, 'old.xml') try: old_contents = self.repo.ExportManifest() except cros_build_lib.RunCommandError as e: logging.warning(str(e)) else: osutils.WriteFile(old_filename, old_contents) fresh_sync = False # Sync. self.ManifestCheckout(self.GetNextManifest()) # Print the blamelist. if fresh_sync: logging.PrintBuildbotStepText('(From scratch)') elif self._run.options.buildbot: lkgm_manager.GenerateBlameList(self.repo, old_filename) # Incremental builds request an additional build before patching changes. if self._run.config.build_before_patching: pre_build_passed = self.RunPrePatchBuild() if not pre_build_passed: logging.PrintBuildbotStepText('Pre-patch build failed.')
def BuildPackages(target, sysroot, run_configs): """Build and install packages into a sysroot. Args: target (build_target_lib.BuildTarget): The target whose packages are being installed. sysroot (sysroot_lib.Sysroot): The sysroot where the packages are being installed. run_configs (BuildPackagesRunConfig): The run configs. """ cros_build_lib.AssertInsideChroot() cmd = [os.path.join(constants.CROSUTILS_DIR, 'build_packages'), '--board', target.name, '--board_root', sysroot.path] cmd += run_configs.GetBuildPackagesArgs() extra_env = run_configs.GetEnv() extra_env['USE_NEW_PARALLEL_EMERGE'] = '1' with osutils.TempDir() as tempdir: extra_env[constants.CROS_METRICS_DIR_ENVVAR] = tempdir try: # REVIEW: discuss which dimensions to flatten into the metric # name other than target.name... with metrics.timer('service.sysroot.BuildPackages.RunCommand'): cros_build_lib.run(cmd, extra_env=extra_env) except cros_build_lib.RunCommandError as e: failed_pkgs = portage_util.ParseDieHookStatusFile(tempdir) raise sysroot_lib.PackageInstallError( str(e), e.result, exception=e, packages=failed_pkgs)
def testCanSubmitChangeDisallwedByParentConfigByDefault(self): """Test CanSubmitChangeInPreCq when sub-config allows it, but not root.""" mock_checkout = mock.Mock() with osutils.TempDir(set_global=True) as tempdir: root_dir = os.path.join(tempdir, 'overlays') mock_checkout.GetPath.return_value = root_dir root_ini = os.path.join(root_dir, 'COMMIT-QUEUE.ini') osutils.WriteFile( root_ini, '[GENERAL]\n' 'union-pre-cq-sub-configs: yes\n', makedirs=True) f_1 = self._CreateOverlayPaths( root_dir, 'overlay-lumpy', '[GENERAL]\nsubmit-in-pre-cq: yes\n') f_2 = self._CreateOverlayPaths( root_dir, 'overlay-link', '[GENERAL]\n') diff_dict = {f: 'M' for f in (f_1, f_2)} change = self._patch_factory.MockPatch() self.PatchObject(cros_patch.GerritPatch, 'GetDiffStatus', return_value=diff_dict) parser = self.CreateCQConfigParser( change=change, common_config_file=root_ini, checkout=mock_checkout) self.assertFalse(parser.CanSubmitChangeInPreCQ())
def Cached(cls, cache_dir, *args, **kwargs): """Reuses previously fetched GSUtil, performing the fetch if necessary. Arguments: cache_dir: The toplevel cache dir. *args, **kwargs: Arguments that are passed through to the GSContext() constructor. Returns: An initialized GSContext() object. """ common_path = os.path.join(cache_dir, constants.COMMON_CACHE) tar_cache = cache.TarballCache(common_path) key = (cls.GSUTIL_TAR, ) # The common cache will not be LRU, removing the need to hold a read # lock on the cached gsutil. ref = tar_cache.Lookup(key) if ref.Exists(): logging.debug('Reusing cached gsutil.') else: logging.debug('Fetching gsutil.') with osutils.TempDir(base_dir=tar_cache.staging_dir) as tempdir: gsutil_tar = os.path.join(tempdir, cls.GSUTIL_TAR) cros_build_lib.RunCurl([cls.GSUTIL_URL, '-o', gsutil_tar], debug_level=logging.DEBUG) ref.SetDefault(gsutil_tar) gsutil_bin = os.path.join(ref.path, 'gsutil', 'gsutil') return cls(*args, gsutil_bin=gsutil_bin, **kwargs)
def UploadDummyArtifact(self, path, faft_hack=False): """Upload artifacts to the dummy build results.""" logging.info('UploadDummyArtifact: %s', path) with osutils.TempDir(prefix='dummy') as tempdir: artifact_path = os.path.join( tempdir, '%s/%s' % (self._current_board, os.path.basename(path))) logging.info('Rename: %s -> %s', path, artifact_path) os.mkdir(os.path.join(tempdir, self._current_board)) shutil.copyfile(path, artifact_path) logging.info('Main artifact from: %s', artifact_path) if faft_hack: # We put the firmware artifact in a directory named by board so that # immutable FAFT infrastructure can find it. We should remove this. self.UploadArtifact(artifact_path, archive=True, prefix=self._current_board) else: self.UploadArtifact(artifact_path, archive=True) gs_context = gs.GSContext(dry_run=self._run.options.debug_forced) for url in self.GetDummyArchiveUrls(): logging.info('Uploading dummy artifact to %s...', url) with timeout_util.Timeout(20 * 60): logging.info('Dummy artifact from: %s', path) gs_context.CopyInto(path, url, parallel=True, recursive=True)
def CreateFactoryZip(self): """Create/publish the firmware build artifact for the current board.""" logging.info('Create factory_image.zip') # TODO: Move this image creation logic back into WorkspaceBuildImages. factory_install_symlink = None if 'factory_install' in self._run.config['images']: alias = commands.BuildFactoryInstallImage( self._build_root, self._current_board, extra_env=self._portage_extra_env) factory_install_symlink = self.GetImageDirSymlink( alias, self._build_root) if self._run.config['factory_install_netboot']: commands.MakeNetboot(self._build_root, self._current_board, factory_install_symlink) # Build and upload factory zip if needed. assert self._run.config['factory_toolkit'] with osutils.TempDir(prefix='factory_zip') as zip_dir: filename = commands.BuildFactoryZip(self._build_root, self._current_board, zip_dir, factory_install_symlink, self.dummy_version) self.UploadDummyArtifact(os.path.join(zip_dir, filename))
def SimpleChromeWorkflowTest(sysroot_path, build_target_name, chrome_root, goma): """Execute SimpleChrome workflow tests Args: sysroot_path (str): The sysroot path for testing Chrome. build_target_name (str): Board build target chrome_root (str): Path to Chrome source root. goma (goma_util.Goma): Goma object (or None). """ board_dir = 'out_%s' % build_target_name out_board_dir = os.path.join(chrome_root, board_dir, 'Release') use_goma = goma != None extra_args = [] with osutils.TempDir(prefix='chrome-sdk-cache') as tempdir: sdk_cmd = _InitSimpleChromeSDK(tempdir, build_target_name, sysroot_path, chrome_root, use_goma) if goma: extra_args.extend(['--nostart-goma', '--gomadir', goma.linux_goma_dir]) _BuildChrome(sdk_cmd, chrome_root, out_board_dir, goma) _TestDeployChrome(sdk_cmd, out_board_dir) _VMTestChrome(build_target_name, sdk_cmd)
def testSafeSymlink(self): """Test that we can create symlinks.""" with osutils.TempDir(sudo_rm=True) as tempdir: file_a = os.path.join(tempdir, 'a') osutils.WriteFile(file_a, 'a') file_b = os.path.join(tempdir, 'b') osutils.WriteFile(file_b, 'b') user_dir = os.path.join(tempdir, 'bar') user_link = os.path.join(user_dir, 'link') osutils.SafeMakedirs(user_dir) root_dir = os.path.join(tempdir, 'foo') root_link = os.path.join(root_dir, 'link') osutils.SafeMakedirs(root_dir, sudo=True) # We can create and override links owned by a non-root user. osutils.SafeSymlink(file_a, user_link) self.assertEqual('a', osutils.ReadFile(user_link)) osutils.SafeSymlink(file_b, user_link) self.assertEqual('b', osutils.ReadFile(user_link)) # We can create and override links owned by root. osutils.SafeSymlink(file_a, root_link, sudo=True) self.assertEqual('a', osutils.ReadFile(root_link)) osutils.SafeSymlink(file_b, root_link, sudo=True) self.assertEqual('b', osutils.ReadFile(root_link))
def VmTest(input_proto, _output_proto, _config): """Run VM tests.""" build_target_name = input_proto.build_target.name vm_path = input_proto.vm_path.path test_harness = input_proto.test_harness vm_tests = input_proto.vm_tests cmd = [ 'cros_run_test', '--debug', '--no-display', '--copy-on-write', '--board', build_target_name, '--image-path', vm_path, '--%s' % test_pb2.VmTestRequest.TestHarness.Name(test_harness).lower() ] cmd.extend(vm_test.pattern for vm_test in vm_tests) if input_proto.ssh_options.port: cmd.extend(['--ssh-port', str(input_proto.ssh_options.port)]) if input_proto.ssh_options.private_key_path: cmd.extend( ['--private-key', input_proto.ssh_options.private_key_path.path]) # TODO(evanhernandez): Find a nice way to pass test_that-args through # the build API. Or obviate them. if test_harness == test_pb2.VmTestRequest.AUTOTEST: cmd.append('--test_that-args=--whitelist-chrome-crashes') with osutils.TempDir(prefix='vm-test-results.') as results_dir: cmd.extend(['--results-dir', results_dir]) cros_build_lib.run(cmd, kill_timeout=10 * 60)
def main(argv): options = _ParseCommandLine(argv) _PostParseCheck(options) # Set cros_build_lib debug level to hide RunCommand spew. if options.verbose: logging.getLogger().setLevel(logging.DEBUG) else: logging.getLogger().setLevel(logging.INFO) with stats.UploadContext() as queue: cmd_stats = stats.Stats.SafeInit(cmd_line=argv, cmd_base='deploy_chrome') if cmd_stats: queue.put([cmd_stats, stats.StatsUploader.URL, 1]) with osutils.TempDir(set_global=True) as tempdir: staging_dir = options.staging_dir if not staging_dir: staging_dir = os.path.join(tempdir, 'chrome') deploy = DeployChrome(options, tempdir, staging_dir) try: deploy.Perform() except failures_lib.StepFailure as ex: raise SystemExit(str(ex).strip()) deploy.Cleanup()
def _Insert(self, key, tarball_path): """Insert a tarball and its extracted contents into the cache.""" with osutils.TempDir(base_dir=self.staging_dir) as tempdir: extract_path = os.path.join(tempdir, 'extract') os.mkdir(extract_path) Untar(tarball_path, extract_path) DiskCache._Insert(self, key, extract_path)
def BuildAutotestTarballs(self): """Build the autotest tarballs.""" with osutils.TempDir(prefix='cbuildbot-autotest') as tempdir: with self.ArtifactUploader(strict=True) as queue: cwd = os.path.abspath( os.path.join(self._build_root, 'chroot', 'build', self._current_board, constants.AUTOTEST_BUILD_PATH, '..')) control_files_tarball = commands.BuildAutotestControlFilesTarball( self._build_root, cwd, tempdir) queue.put([control_files_tarball]) packages_tarball = commands.BuildAutotestPackagesTarball( self._build_root, cwd, tempdir) queue.put([packages_tarball]) # Tar up the test suites. test_suites_tarball = commands.BuildAutotestTestSuitesTarball( self._build_root, cwd, tempdir) queue.put([test_suites_tarball]) # Build the server side package. server_tarball = commands.BuildAutotestServerPackageTarball( self._build_root, cwd, tempdir) queue.put([server_tarball])
def _CopyToDeviceInParallel(self, src, dest): """Chop source file in chunks, send them to destination in parallel. Transfer chunks of file in parallel and assemble in destination if the file size is larger than chunk size. Fall back to scp mode otherwise. Args: src: Local path as a string. dest: rsync/scp path of the form <host>:/<path> as a string. """ src_filename = os.path.basename(src) chunk_prefix = src_filename + '_' with osutils.TempDir() as tempdir: chunk_path = os.path.join(tempdir, chunk_prefix) try: cmd = ['split', '-b', str(CHUNK_SIZE), src, chunk_path] cros_build_lib.run(cmd) input_list = [[chunk_file, dest, 'scp'] for chunk_file in glob.glob(chunk_path + '*')] parallel.RunTasksInProcessPool(self.CopyToDevice, input_list, processes=DEGREE_OF_PARALLELISM) logging.info('Assembling these chunks now.....') chunks = '%s/%s*' % (dest, chunk_prefix) final_dest = '%s/%s' % (dest, src_filename) assemble_cmd = ['cat', chunks, '>', final_dest] self.run(assemble_cmd) cleanup_cmd = ['rm', '-f', chunks] self.run(cleanup_cmd) except IOError: logging.err('Could not complete the payload transfer...') raise logging.info('Successfully copy %s to %s in chunks in parallel', src, dest)
def testGetUnionPreCQSubConfigsFlag(self): """Test ShouldUnionPreCQFromSubConfigs.""" with osutils.TempDir(set_global=True) as tempdir: path = os.path.join(tempdir, 'foo.ini') osutils.WriteFile(path, '[GENERAL]\nunion-pre-cq-sub-configs: yes\n') parser = self.CreateCQConfigParser(common_config_file=path) self.assertTrue(parser.GetUnionPreCQSubConfigsFlag())
def run_case(content, expected): with osutils.TempDir() as temp: ebuild = os.path.join(temp, 'overlay', 'app-misc', 'foo-0.0.1-r1.ebuild') osutils.WriteFile(ebuild, content, makedirs=True) self.assertEqual(expected, portage_util.EBuild(ebuild).has_test)
def testGetOptionFromConfigFileOnGoodConfigFile(self): """Test if we can handle a good config file.""" with osutils.TempDir(set_global=True) as tempdir: path = os.path.join(tempdir, 'foo.ini') osutils.WriteFile(path, '[a]\nb: bar baz\n') ignored = self.GetOption(path) self.assertEqual('bar baz', ignored)
def testGetIgnoredStages(self): """Test if we can get the ignored stages from a good config file.""" with osutils.TempDir(set_global=True) as tempdir: path = os.path.join(tempdir, 'foo.ini') osutils.WriteFile(path, '[GENERAL]\nignored-stages: bar baz\n') ignored = self.GetOption(path, section='GENERAL', option='ignored-stages') self.assertEqual('bar baz', ignored)
def testPackageBuildFailure(self): """Test handling of raised BuildPackageFailure.""" tempdir = osutils.TempDir(base_dir=self.tempdir) self.PatchObject(osutils, 'TempDir', return_value=tempdir) pkgs = ['cat/pkg', 'foo/bar'] expected = [('cat', 'pkg'), ('foo', 'bar')] result = test_service.BuildTargetUnitTestResult(1, None) result.failed_cpvs = [ portage_util.SplitCPV(p, strict=False) for p in pkgs ] self.PatchObject(test_service, 'BuildTargetUnitTest', return_value=result) input_msg = self._GetInput(board='board', result_path=self.tempdir) output_msg = self._GetOutput() rc = test_controller.BuildTargetUnitTest(input_msg, output_msg, self.api_config) self.assertEqual( controller.RETURN_CODE_UNSUCCESSFUL_RESPONSE_AVAILABLE, rc) self.assertTrue(output_msg.failed_packages) failed = [] for pi in output_msg.failed_packages: failed.append((pi.category, pi.package_name)) self.assertCountEqual(expected, failed)
def testGetSubsystem(self): """Test if we can get the subsystem label from a good config file.""" with osutils.TempDir(set_global=True) as tempdir: path = os.path.join(tempdir, 'foo.ini') osutils.WriteFile(path, '[GENERAL]\nsubsystem: power light\n') ignored = self.GetOption(path, section='GENERAL', option='subsystem') self.assertEqual('power light', ignored)
def MountedMoblabDiskContext(self): """A contextmanager to mount the already prepared moblab disk. This can be used to modify the external disk mounted by the moblab VM, while the VMs are not running. It is often much more performance to modify the disk on the host than SCP'ing large files into the VM. vms = moblab_vm.MoblabVm(workspace_dir) vms.Create(...) with vms.MountedMoblabDiskContext() as moblab_disk_dir: # Copy stuff into the directory at moblab_disk_dir vms.Start() ... """ if not self.initialized: raise MoblabVmError( 'Uninitialized workspace %s. Can not mount disk.' % self.workspace) if self.running: raise MoblabVmError( 'VM at %s is already running. Stop() before mounting disk.' % self.workspace) with osutils.TempDir() as tempdir: osutils.MountDir(self._config[_CONFIG_MOBLAB_DISK], tempdir, 'ext4', skip_mtab=True) try: yield tempdir finally: osutils.UmountDir(tempdir)
def CreateExt4Image(self): """Create an ext4 image.""" with osutils.TempDir(prefix='dlc_') as temp_dir: mount_point = os.path.join(temp_dir, 'mount_point') # Create the directory where the image is located if it doesn't exist. osutils.SafeMakedirs(os.path.split(self.dest_image)[0]) # Create a raw image file. with open(self.dest_image, 'w') as f: f.truncate(self._BLOCKS * self._BLOCK_SIZE) # Create an ext4 file system on the raw image. cros_build_lib.run([ '/sbin/mkfs.ext4', '-b', str(self._BLOCK_SIZE), '-O', '^has_journal', self.dest_image ], capture_output=True) # Create the mount_point directory. osutils.SafeMakedirs(mount_point) # Mount the ext4 image. osutils.MountDir(self.dest_image, mount_point, mount_opts=('loop', 'rw')) try: self.SetupDlcImageFiles(mount_point) finally: # Unmount the ext4 image. osutils.UmountDir(mount_point) # Shrink to minimum size. cros_build_lib.run(['/sbin/e2fsck', '-y', '-f', self.dest_image], capture_output=True) cros_build_lib.run(['/sbin/resize2fs', '-M', self.dest_image], capture_output=True)
def PerformStage(self): chrome_version = self.DetermineChromeVersion() logging.PrintBuildbotStepText('tag %s' % chrome_version) sync_chrome = os.path.join( self._orig_root, 'chromite', 'bin', 'sync_chrome') # Branched gclient can use git-cache incompatibly, so use a temp one. with osutils.TempDir(prefix='dummy') as git_cache: # --reset tells sync_chrome to blow away local changes and to feel # free to delete any directories that get in the way of syncing. This # is needed for unattended operation. # --ignore-locks tells sync_chrome to ignore git-cache locks. # --gclient is not specified here, sync_chrome will locate the one # on the $PATH. cmd = [sync_chrome, '--reset', '--ignore_locks', '--tag', chrome_version, '--git_cache_dir', git_cache] if constants.USE_CHROME_INTERNAL in self._run.config.useflags: cmd += ['--internal'] cmd += [self._run.options.chrome_root] with timeout_util.Timeout(self.SYNC_CHROME_TIMEOUT): retry_util.RunCommandWithRetries( constants.SYNC_RETRIES, cmd, cwd=self._build_root)
def GenerateVerity(self): """Generate verity parameters and hashes for the image.""" logging.info('Generating DLC image verity.') with osutils.TempDir(prefix='dlc_') as temp_dir: hash_tree = os.path.join(temp_dir, 'hash_tree') # Get blocks in the image. blocks = math.ceil(os.path.getsize(self.dest_image) / self._BLOCK_SIZE) result = cros_build_lib.run([ 'verity', 'mode=create', 'alg=sha256', 'payload=' + self.dest_image, 'payload_blocks=' + str(blocks), 'hashtree=' + hash_tree, 'salt=random' ], capture_output=True) table = result.output # Append the merkle tree to the image. osutils.WriteFile( self.dest_image, osutils.ReadFile(hash_tree, mode='rb'), mode='a+b') # Write verity parameter to table file. osutils.WriteFile(self.dest_table, table, mode='wb') # Compute image hash. image_hash = HashFile(self.dest_image) table_hash = HashFile(self.dest_table) # Write image hash to imageloader.json file. blocks = math.ceil(os.path.getsize(self.dest_image) / self._BLOCK_SIZE) imageloader_json_content = self.GetImageloaderJsonContent( image_hash, table_hash, int(blocks)) pformat.json(imageloader_json_content, fp=self.dest_imageloader_json)
def PerformStage(self): """Run sample cbuildbot_launch tests.""" # TODO: Move this tempdir, it's a problem. with osutils.TempDir() as tryjob_buildroot: self.tryjob_buildroot = tryjob_buildroot # Iniitial build fails. self.RunCbuildbotLauncher('Initial Build (fail)', self._run.options.branch, 'fail-build', expect_success=False) # Test cleanup after a fail. self.RunCbuildbotLauncher('Second Build (pass)', self._run.options.branch, 'success-build', expect_success=True) # Test reduced cleanup after a pass. self.RunCbuildbotLauncher('Third Build (pass)', self._run.options.branch, 'success-build', expect_success=True) # Test branch transition. self.RunCbuildbotLauncher('Branch Build (pass)', 'release-R68-10718.B', 'success-build', expect_success=True)
def CreateAndUploadPayload(payload, cache, work_dir, sign=True, verify=True, dry_run=False, au_generator_uri=None): """Helper to create a PaygenPayloadLib instance and use it. Args: payload: An instance of utils.Payload describing the payload to generate. cache: An instance of DownloadCache for retrieving files. work_dir: A working directory that can hold scratch files. Will be cleaned up when done, and won't interfere with other users. None for /tmp. sign: Boolean saying if the payload should be signed (normally, you do). verify: whether the payload should be verified (default: True) dry_run: don't perform actual work au_generator_uri: URI to override standard au_generator.zip rules. """ with osutils.TempDir(prefix='paygen_payload.', base_dir=work_dir) as gen_dir: logging.info('* Starting payload generation') start_time = datetime.datetime.now() _PaygenPayload(payload, cache, gen_dir, sign, verify, au_generator_uri, dry_run=dry_run).Run() end_time = datetime.datetime.now() logging.info('* Finished payload generation in %s', end_time - start_time)