def _Initialize(self): # Create a PageSetArchiveInfo object. if self.archive_data_file: self.wpr_archive_info = page_set_archive_info.PageSetArchiveInfo.FromFile( os.path.join(self._base_dir, self.archive_data_file)) # Attempt to download the credentials file. if self.credentials_path: try: cloud_storage.GetIfChanged( os.path.join(self._base_dir, self.credentials_path)) except (cloud_storage.CredentialsError, cloud_storage.PermissionError): logging.warning('Cannot retrieve credential file: %s', self.credentials_path) # Scan every serving directory for .sha1 files # and download them from Cloud Storage. Assume all data is public. all_serving_dirs = self.serving_dirs.copy() # Add individual page dirs to all serving dirs. for page in self: if page.is_file: all_serving_dirs.add(page.serving_dir) # Scan all serving dirs. for serving_dir in all_serving_dirs: if os.path.splitdrive(serving_dir)[1] == '/': raise ValueError('Trying to serve root directory from HTTP server.') for dirpath, _, filenames in os.walk(serving_dir): for filename in filenames: path, extension = os.path.splitext( os.path.join(dirpath, filename)) if extension != '.sha1': continue cloud_storage.GetIfChanged(path)
def _UpdatePageSetArchivesIfChanged(page_set): # Attempt to download the credentials file. if page_set.credentials_path: try: cloud_storage.GetIfChanged( os.path.join(page_set.base_dir, page_set.credentials_path)) except (cloud_storage.CredentialsError, cloud_storage.PermissionError, cloud_storage.CloudStorageError) as e: logging.warning( 'Cannot retrieve credential file %s due to cloud storage ' 'error %s', page_set.credentials_path, str(e)) # Scan every serving directory for .sha1 files # and download them from Cloud Storage. Assume all data is public. all_serving_dirs = page_set.serving_dirs.copy() # Add individual page dirs to all serving dirs. for page in page_set: if page.is_file: all_serving_dirs.add(page.serving_dir) # Scan all serving dirs. for serving_dir in all_serving_dirs: if os.path.splitdrive(serving_dir)[1] == '/': raise ValueError( 'Trying to serve root directory from HTTP server.') for dirpath, _, filenames in os.walk(serving_dir): for filename in filenames: path, extension = os.path.splitext( os.path.join(dirpath, filename)) if extension != '.sha1': continue cloud_storage.GetIfChanged(path)
def _InstallIpfw(self): ipfw_bin = os.path.join(util.GetTelemetryDir(), 'bin', 'ipfw') ipfw_mod = os.path.join(util.GetTelemetryDir(), 'bin', 'ipfw_mod.ko') try: changed = cloud_storage.GetIfChanged( cloud_storage.INTERNAL_BUCKET, ipfw_bin) changed |= cloud_storage.GetIfChanged( cloud_storage.INTERNAL_BUCKET, ipfw_mod) except cloud_storage.CloudStorageError, e: logging.error(e) logging.error('You may proceed by manually installing dummynet. See: ' 'http://info.iet.unipi.it/~luigi/dummynet/') sys.exit(1)
def _InstallIpfw(self): ipfw_bin = support_binaries.FindPath('ipfw', self.GetOSName()) ipfw_mod = support_binaries.FindPath('ipfw_mod.ko', self.GetOSName()) try: changed = cloud_storage.GetIfChanged( ipfw_bin, cloud_storage.INTERNAL_BUCKET) changed |= cloud_storage.GetIfChanged( ipfw_mod, cloud_storage.INTERNAL_BUCKET) except cloud_storage.CloudStorageError, e: logging.error(e) logging.error('You may proceed by manually installing dummynet. See: ' 'http://info.iet.unipi.it/~luigi/dummynet/') sys.exit(1)
def DriverCreator(): ie_driver_exe = os.path.join( util.GetTelemetryDir(), 'bin', 'IEDriverServer_%s.exe' % self._architecture) cloud_storage.GetIfChanged(cloud_storage.PUBLIC_BUCKET, ie_driver_exe) return webdriver.Ie(executable_path=ie_driver_exe)
def FromDict(cls, data, file_path): page_set = cls(file_path, data) for page_attributes in data['pages']: url = page_attributes.pop('url') page = page_module.Page(url, page_set, attributes=page_attributes, base_dir=page_set._base_dir) # pylint: disable=W0212 page_set.pages.append(page) all_serving_dirs = set() for page in page_set: if page.is_file: serving_dirs, _ = page.serving_dirs_and_file if isinstance(serving_dirs, list): all_serving_dirs |= set(serving_dirs) else: all_serving_dirs.add(serving_dirs) for serving_dir in all_serving_dirs: for dirpath, _, filenames in os.walk(serving_dir): for filename in filenames: file_path, extension = os.path.splitext( os.path.join(dirpath, filename)) if extension != '.sha1': continue cloud_storage.GetIfChanged(cloud_storage.DEFAULT_BUCKET, file_path) return page_set
def __init__(self, file_path, data): self._file_path = file_path self._base_dir = os.path.dirname(file_path) # Ensure directory exists. if not os.path.exists(self._base_dir): os.makedirs(self._base_dir) # Download all .wpr files. for archive_path in data['archives']: archive_path = self._WprFileNameToPath(archive_path) try: cloud_storage.GetIfChanged(archive_path) except (cloud_storage.CredentialsError, cloud_storage.PermissionError): if os.path.exists(archive_path): # If the archive exists, assume the user recorded their own and # simply warn. logging.warning('Need credentials to update WPR archive: %s', archive_path) # Map from the relative path (as it appears in the metadata file) of the # .wpr file to a list of page names it supports. self._wpr_file_to_page_names = data['archives'] # Map from the page name to a relative path (as it appears in the metadata # file) of the .wpr file. self._page_name_to_wpr_file = dict() # Find out the wpr file names for each page. for wpr_file in data['archives']: page_names = data['archives'][wpr_file] for page_name in page_names: self._page_name_to_wpr_file[page_name] = wpr_file self.temp_target_wpr_file_path = None
def FromFile(cls, file_path, page_set_file_path): cloud_storage.GetIfChanged(cloud_storage.DEFAULT_BUCKET, file_path) if os.path.exists(file_path): with open(file_path, 'r') as f: data = json.load(f) return cls(file_path, page_set_file_path, data) return cls(file_path, page_set_file_path, {'archives': {}})
def FindPath(binary_name, platform_name): """Returns the path to the given binary name, pulling from the cloud if necessary.""" if platform_name == 'win': binary_name += '.exe' command = FindLocallyBuiltPath(binary_name) if not command and _IsInCloudStorage(binary_name, platform_name): cloud_storage.GetIfChanged(_GetBinPath(binary_name, platform_name)) command = _GetBinPath(binary_name, platform_name) return command
def _InstallAvconv(self): telemetry_bin_dir = os.path.join(util.GetTelemetryDir(), 'bin') avconv_bin = os.path.join(telemetry_bin_dir, 'avconv') os.environ['PATH'] += os.pathsep + telemetry_bin_dir try: cloud_storage.GetIfChanged(cloud_storage.INTERNAL_BUCKET, avconv_bin) except cloud_storage.CloudStorageError, e: logging.error(e) logging.error('You may proceed by manually installing avconv via:\n' 'sudo apt-get install libav-tools') sys.exit(1)
def _InstallAvconv(self): avconv_bin = support_binaries.FindPath('avconv', self.GetOSName()) os.environ['PATH'] += os.pathsep + os.path.join(util.GetTelemetryDir(), 'bin') try: cloud_storage.GetIfChanged(avconv_bin, cloud_storage.INTERNAL_BUCKET) except cloud_storage.CloudStorageError, e: logging.error(e) logging.error('You may proceed by manually installing avconv via:\n' 'sudo apt-get install libav-tools') sys.exit(1)
def _InstallBinary(self, bin_name, fallback_package=None): bin_path = support_binaries.FindPath(bin_name, self.GetOSName()) os.environ['PATH'] += os.pathsep + os.path.dirname(bin_path) try: cloud_storage.GetIfChanged(bin_path, cloud_storage.INTERNAL_BUCKET) os.chmod(bin_path, 0755) except cloud_storage.CloudStorageError, e: logging.error(e) if fallback_package: logging.error( 'You may proceed by manually installing %s via:\n' 'sudo apt-get install %s' % (bin_name, fallback_package)) sys.exit(1)
def __init__(self, archive_data_file_path, page_set_file_path, data): self._archive_data_file_path = archive_data_file_path self._archive_data_file_dir = os.path.dirname(archive_data_file_path) # Ensure directory exists. if not os.path.exists(self._archive_data_file_dir): os.makedirs(self._archive_data_file_dir) # Back pointer to the page set file. self._page_set_file_path = page_set_file_path # Download all .wpr files. for archive_path in data['archives']: archive_path = self._WprFileNameToPath(archive_path) try: cloud_storage.GetIfChanged(cloud_storage.INTERNAL_BUCKET, archive_path) except (cloud_storage.CredentialsError, cloud_storage.PermissionError) as e: if os.path.exists(archive_path): # If the archive exists, assume the user recorded their own and # simply warn. logging.warning('Could not download WPR archive: %s', archive_path) else: # If the archive doesn't exist, this is fatal. logging.error( 'Can not run without required WPR archive: %s. ' 'If you believe you have credentials, follow the ' 'instructions below. If you do not have credentials, ' 'you may use record_wpr to make your own recording or ' 'run against live sites with --allow-live-sites.', archive_path) logging.error(e) sys.exit(1) # Map from the relative path (as it appears in the metadata file) of the # .wpr file to a list of urls it supports. self._wpr_file_to_urls = data['archives'] # Map from the page url to a relative path (as it appears in the metadata # file) of the .wpr file. self._url_to_wpr_file = dict() # Find out the wpr file names for each page. for wpr_file in data['archives']: page_urls = data['archives'][wpr_file] for url in page_urls: self._url_to_wpr_file[url] = wpr_file self.temp_target_wpr_file_path = None
def __init__(self, file_path, data, ignore_archive=False): self._file_path = file_path self._base_dir = os.path.dirname(file_path) # Ensure directory exists. if not os.path.exists(self._base_dir): os.makedirs(self._base_dir) # Download all .wpr files. if not ignore_archive: # TODO(tbarzic): Remove this once http://crbug.com/351143 is diagnosed. log_cloud_storage_exception = True for archive_path in data['archives']: archive_path = self._WprFileNameToPath(archive_path) try: cloud_storage.GetIfChanged(archive_path) except (cloud_storage.CredentialsError, cloud_storage.PermissionError) as e: if os.path.exists(archive_path): # If the archive exists, assume the user recorded their own and # simply warn. logging.warning( 'Need credentials to update WPR archive: %s', archive_path) elif log_cloud_storage_exception: # Log access errors only once, as they should stay the same in other # iterations. log_cloud_storage_exception = False logging.warning('Error getting WPR archive %s: %s ' % (archive_path, str(e))) logging.info( 'HOME: "%s"; USER: "******"' % (os.environ.get( 'HOME', ''), os.environ.get('USER', ''))) # Map from the relative path (as it appears in the metadata file) of the # .wpr file to a list of page names it supports. self._wpr_file_to_page_names = data['archives'] # Map from the page name to a relative path (as it appears in the metadata # file) of the .wpr file. self._page_name_to_wpr_file = dict() # Find out the wpr file names for each page. for wpr_file in data['archives']: page_names = data['archives'][wpr_file] for page_name in page_names: self._page_name_to_wpr_file[page_name] = wpr_file self.temp_target_wpr_file_path = None
def __init__(self, archive_data_file_path, page_set_file_path, data): self._archive_data_file_path = archive_data_file_path self._archive_data_file_dir = os.path.dirname(archive_data_file_path) # Back pointer to the page set file. self._page_set_file_path = page_set_file_path for archive_path in data['archives']: cloud_storage.GetIfChanged(cloud_storage.DEFAULT_BUCKET, archive_path) # Map from the relative path (as it appears in the metadata file) of the # .wpr file to a list of urls it supports. self._wpr_file_to_urls = data['archives'] # Map from the page url to a relative path (as it appears in the metadata # file) of the .wpr file. self._url_to_wpr_file = dict() # Find out the wpr file names for each page. for wpr_file in data['archives']: page_urls = data['archives'][wpr_file] for url in page_urls: self._url_to_wpr_file[url] = wpr_file self.temp_target_wpr_file_path = None
def GetIfChanged(profiler_binary): cloud_storage.GetIfChanged(GetHostPath(profiler_binary), cloud_storage.PUBLIC_BUCKET)
def __init__(self, file_path='', attributes=None): self.file_path = file_path # These attributes can be set dynamically by the page set. self.description = '' self.archive_data_file = '' self.credentials_path = None self.user_agent_type = None self.make_javascript_deterministic = True self.navigate_steps = {'action': 'navigate'} if attributes: for k, v in attributes.iteritems(): setattr(self, k, v) # Create a PageSetArchiveInfo object. if self.archive_data_file: self.wpr_archive_info = page_set_archive_info.PageSetArchiveInfo.FromFile( os.path.join(self._base_dir, self.archive_data_file)) else: self.wpr_archive_info = None # Create a Page object for every page. self.pages = [] if attributes and 'pages' in attributes: for page_attributes in attributes['pages']: url = page_attributes.pop('url') page = page_module.Page( url, self, attributes=page_attributes, base_dir=self._base_dir) self.pages.append(page) # Prepend _base_dir to our serving dirs. # Always use realpath to ensure no duplicates in set. self.serving_dirs = set() if attributes and 'serving_dirs' in attributes: if not isinstance(attributes['serving_dirs'], list): raise ValueError('serving_dirs must be a list.') for serving_dir in attributes['serving_dirs']: self.serving_dirs.add( os.path.realpath(os.path.join(self._base_dir, serving_dir))) # Attempt to download the credentials file. if self.credentials_path: try: cloud_storage.GetIfChanged( os.path.join(self._base_dir, self.credentials_path)) except (cloud_storage.CredentialsError, cloud_storage.PermissionError): logging.warning('Cannot retrieve credential file: %s', self.credentials_path) # Scan every serving directory for .sha1 files # and download them from Cloud Storage. Assume all data is public. all_serving_dirs = self.serving_dirs.copy() # Add individual page dirs to all serving dirs. for page in self: if page.is_file: all_serving_dirs.add(page.serving_dir) # Scan all serving dirs. for serving_dir in all_serving_dirs: if os.path.splitdrive(serving_dir)[1] == '/': raise ValueError('Trying to serve root directory from HTTP server.') for dirpath, _, filenames in os.walk(serving_dir): for filename in filenames: path, extension = os.path.splitext( os.path.join(dirpath, filename)) if extension != '.sha1': continue cloud_storage.GetIfChanged(path)
def _DownloadGeneratedProfileArchive(self, options): """Download and extract profile directory archive if one exists.""" archive_name = getattr(self, 'generated_profile_archive', None) # If attribute not specified, nothing to do. if not archive_name: return # If profile dir specified on command line, nothing to do. if options.browser_options.profile_dir: logging.warning( "Profile directory specified on command line: %s, this" "overrides the benchmark's default profile directory.", options.browser_options.profile_dir) return # Download profile directory from cloud storage. found_browser = browser_finder.FindBrowser(options) test_data_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', 'perf', 'generated_profiles', found_browser.target_os) generated_profile_archive_path = os.path.normpath( os.path.join(test_data_dir, archive_name)) try: cloud_storage.GetIfChanged(generated_profile_archive_path, cloud_storage.PUBLIC_BUCKET) except (cloud_storage.CredentialsError, cloud_storage.PermissionError) as e: if os.path.exists(generated_profile_archive_path): # If the profile directory archive exists, assume the user has their # own local copy simply warn. logging.warning('Could not download Profile archive: %s', generated_profile_archive_path) else: # If the archive profile directory doesn't exist, this is fatal. logging.error( 'Can not run without required profile archive: %s. ' 'If you believe you have credentials, follow the ' 'instructions below.', generated_profile_archive_path) logging.error(e) sys.exit(-1) # Unzip profile directory. extracted_profile_dir_path = ( os.path.splitext(generated_profile_archive_path)[0]) if not os.path.isfile(generated_profile_archive_path): raise Exception("Profile directory archive not downloaded: ", generated_profile_archive_path) with zipfile.ZipFile(generated_profile_archive_path) as f: try: f.extractall(os.path.dirname(generated_profile_archive_path)) except e: # Cleanup any leftovers from unzipping. if os.path.exists(extracted_profile_dir_path): shutil.rmtree(extracted_profile_dir_path) logging.error( "Error extracting profile directory zip file: %s", e) sys.exit(-1) # Run with freshly extracted profile directory. logging.info("Using profile archive directory: %s", extracted_profile_dir_path) options.browser_options.profile_dir = extracted_profile_dir_path