def testException(self): a = asset.Asset(name='bar.tgz', asset_hash=None, algorithm=None, locations=None, cache_dirs=[self.cache_dir]) self.assertRaises(EnvironmentError, a.fetch)
def download(self): metadata = { "type": "vmimage", "name": self.name, "version": self.version, "arch": self.arch, "build": self.build, } if isinstance(self.cache_dir, str): cache_dirs = [self.cache_dir] else: cache_dirs = self.cache_dir asset_path = asset.Asset( name=self.url, asset_hash=self.checksum, algorithm=self.algorithm, locations=None, cache_dirs=cache_dirs, expire=None, metadata=metadata, ).fetch() if archive.is_archive(asset_path): uncompressed_path = os.path.splitext(asset_path)[0] asset_path = archive.uncompress(asset_path, uncompressed_path) self._base_image = asset_path return self._base_image
def test_exception(self): a = asset.Asset(name='bar.tgz', asset_hash=None, algorithm=None, locations=None, cache_dirs=[self.cache_dir], expire=None) self.assertRaises(OSError, a.fetch)
def test_fetch_expire(self): foo_tarball = asset.Asset( self.assetname, asset_hash=self.assethash, algorithm="sha1", locations=[self.url], cache_dirs=[self.cache_dir], expire=None, ).fetch() with open(foo_tarball, "r", encoding="utf-8") as f: content1 = f.read() # Create the file in a different location with a different content new_assetdir = tempfile.mkdtemp(dir=self.tmpdir.name) new_localpath = os.path.join(new_assetdir, self.assetname) new_hash = "9f1ad57044be4799f288222dc91d5eab152921e9" new_url = f"file://{new_localpath}" with open(new_localpath, "w", encoding="utf-8") as f: f.write("Changed!") # Don't expire cached file foo_tarball = asset.Asset( self.assetname, asset_hash=self.assethash, algorithm="sha1", locations=[new_url], cache_dirs=[self.cache_dir], expire=None, ).fetch() with open(foo_tarball, "r", encoding="utf-8") as f: content2 = f.read() self.assertEqual(content1, content2) # Expire cached file foo_tarball = asset.Asset( self.assetname, asset_hash=new_hash, algorithm="sha1", locations=[new_url], cache_dirs=[self.cache_dir], expire=-1, ).fetch() with open(foo_tarball, "r", encoding="utf-8") as f: content2 = f.read() self.assertNotEqual(content1, content2)
def test_incorrect_name_locations_parameter_case2(self): # 2. name is a single file name and locations is one or more entries. with self.assertRaises(ValueError): asset.Asset(name='bar.tgz', asset_hash=None, algorithm=None, locations=None, cache_dirs=[self.cache_dir], expire=None)
def test_fetch_location(self): foo_tarball = asset.Asset(self.assetname, asset_hash=self.assethash, algorithm='sha1', locations=[self.url], cache_dirs=[self.cache_dir], expire=None).fetch() expected_tarball = os.path.join(self.cache_dir, self.assetname) self.assertEqual(foo_tarball, expected_tarball)
def test_incorrect_name_locations_parameter_case1(self): # 1. name is a full URI and locations is empty with self.assertRaises(ValueError): asset.Asset(name='file://bar.tgz', asset_hash=None, algorithm=None, locations='file://foo', cache_dirs=[self.cache_dir], expire=None)
def test_fetch_lockerror(self): with FileLock(os.path.join(self.cache_dir, self.assetname)): a = asset.Asset(self.url, asset_hash=self.assethash, algorithm='sha1', locations=None, cache_dirs=[self.cache_dir], expire=None) self.assertRaises(EnvironmentError, a.fetch)
def test_get_metadata_file_not_exists(self): expected_metadata = {"Name": "name", "version": 1.2} a = asset.Asset(self.url, asset_hash=self.assethash, algorithm='sha1', locations=None, cache_dirs=[self.cache_dir], expire=None, metadata=expected_metadata) self.assertIsNone(a.get_metadata())
def test_fetch_url_cache_by_location(self): foo_tarball = asset.Asset(self.url, asset_hash=self.assethash, algorithm='sha1', locations=None, cache_dirs=[self.cache_dir], expire=None).fetch() expected_location = os.path.join(self.cache_dir, 'by_location') self.assertTrue(foo_tarball.startswith(expected_location)) self.assertTrue(foo_tarball.endswith(self.assetname))
def test_fetch_name_locations_cache_by_name(self): foo_tarball = asset.Asset(self.assetname, asset_hash=None, algorithm='sha1', locations=[self.url, 'file://fake_dir'], cache_dirs=[self.cache_dir], expire=None).fetch() expected_location = os.path.join(self.cache_dir, 'by_name', self.assetname) self.assertEqual(foo_tarball, expected_location)
def test_unknown_scheme(self): invalid = asset.Asset( "weird-protocol://location/?params=foo", None, None, None, [self.cache_dir], None, ) self.assertRaises(asset.UnsupportedProtocolError, invalid.fetch)
def test_fetch_lockerror(self): dirname = os.path.join(self.cache_dir, 'by_name') os.makedirs(dirname) with FileLock(os.path.join(dirname, self.assetname)): a = asset.Asset(self.assetname, asset_hash=self.assethash, algorithm='sha1', locations=['file://foo1', 'file://foo2'], cache_dirs=[self.cache_dir], expire=None) self.assertRaises(OSError, a.fetch)
def test_create_metadata_file(self): expected_metadata = {"Name": "name", "version": 1.2} foo_tarball = asset.Asset(self.url, asset_hash=self.assethash, algorithm='sha1', locations=None, cache_dirs=[self.cache_dir], expire=None, metadata=expected_metadata).fetch() expected_file = "%s_metadata.json" % os.path.splitext(foo_tarball)[0] self.assertTrue(os.path.exists(expected_file))
def test_fetch_name_hash_cache_by_name(self): foo_tarball = asset.Asset( self.assetname, asset_hash=self.assethash, algorithm="sha1", locations=[self.url], cache_dirs=[self.cache_dir], expire=None, ).fetch() expected_location = os.path.join(self.cache_dir, "by_name", self.assetname) self.assertEqual(foo_tarball, expected_location)
def test_fetch_expire(self): foo_tarball = asset.Asset(self.assetname, asset_hash=self.assethash, algorithm='sha1', locations=[self.url], cache_dirs=[self.cache_dir], expire=None).fetch() with open(foo_tarball, 'r') as f: content1 = f.read() # Create the file in a different location with a different content new_assetdir = tempfile.mkdtemp(dir=self.tmpdir.name) new_localpath = os.path.join(new_assetdir, self.assetname) new_hash = '9f1ad57044be4799f288222dc91d5eab152921e9' new_url = 'file://%s' % new_localpath with open(new_localpath, 'w') as f: f.write('Changed!') # Don't expire cached file foo_tarball = asset.Asset(self.assetname, asset_hash=self.assethash, algorithm='sha1', locations=[new_url], cache_dirs=[self.cache_dir], expire=None).fetch() with open(foo_tarball, 'r') as f: content2 = f.read() self.assertEqual(content1, content2) # Expire cached file foo_tarball = asset.Asset(self.assetname, asset_hash=new_hash, algorithm='sha1', locations=[new_url], cache_dirs=[self.cache_dir], expire=-1).fetch() with open(foo_tarball, 'r') as f: content2 = f.read() self.assertNotEqual(content1, content2)
def test_get_metadata_file_not_exists(self): expected_metadata = {"Name": "name", "version": 1.2} a = asset.Asset( self.url, asset_hash=self.assethash, algorithm="sha1", locations=None, cache_dirs=[self.cache_dir], expire=None, metadata=expected_metadata, ) with self.assertRaises(OSError): a.get_metadata()
def test_fetch_different_files(self): """ Checks that when different assets which happen to have the same *filename*, are properly stored in the cache directory and that the right one will be given to the user, no matter if a hash is used or not. """ second_assetname = self.assetname second_asset_origin_dir = tempfile.mkdtemp(dir=self.tmpdir.name) second_asset_local_path = os.path.join(second_asset_origin_dir, second_assetname) second_asset_content = 'This is not your first asset content!' with open(second_asset_local_path, 'w') as f: f.write(second_asset_content) second_asset_origin_url = 'file://%s' % second_asset_local_path a1 = asset.Asset(self.url, self.assethash, 'sha1', None, [self.cache_dir], None) a1.fetch() a2 = asset.Asset(second_asset_origin_url, None, None, None, [self.cache_dir], None) a2_path = a2.fetch() with open(a2_path, 'r') as a2_file: self.assertEqual(a2_file.read(), second_asset_content) third_assetname = self.assetname third_asset_origin_dir = tempfile.mkdtemp(dir=self.tmpdir.name) third_asset_local_path = os.path.join(third_asset_origin_dir, third_assetname) third_asset_content = 'Another content!' with open(third_asset_local_path, 'w') as f: f.write(third_asset_content) third_asset_origin_url = 'file://%s' % third_asset_local_path a3 = asset.Asset(third_asset_origin_url, None, None, None, [self.cache_dir], None) a3_path = a3.fetch() with open(a3_path, 'r') as a3_file: self.assertEqual(a3_file.read(), third_asset_content)
def testFetch_location(self): foo_tarball = asset.Asset(self.assetname, asset_hash=self.assethash, algorithm='sha1', locations=[self.url], cache_dirs=[self.cache_dir]).fetch() expected_tarball = os.path.join(self.cache_dir, self.assetname) self.assertEqual(foo_tarball, expected_tarball) hashfile = '.'.join([expected_tarball, 'sha1']) self.assertTrue(os.path.isfile(hashfile)) expected_content = '%s %s\n' % (self.assethash, self.assetname) with open(hashfile, 'r') as f: content = f.read() self.assertEqual(content, expected_content)
def test_get_metadata_file_exists(self): expected_metadata = {"Name": "name", "version": 1.2} a = asset.Asset( self.url, asset_hash=self.assethash, algorithm="sha1", locations=None, cache_dirs=[self.cache_dir], expire=None, metadata=expected_metadata, ) a.fetch() metadata = a.get_metadata() self.assertEqual(expected_metadata, metadata)
def download(self, url=None): """ Download kernel source. :param url: override the url from where to fetch the kernel source tarball :type url: str or None """ full_url = self._build_kernel_url(base_url=url) self.asset_path = asset.Asset(full_url, asset_hash=None, algorithm=None, locations=None, cache_dirs=self.data_dirs).fetch()
def fetch_asset(self, name, asset_hash=None, algorithm=None, locations=None, expire=None, find_only=False, cancel_on_missing=False): """ Method o call the utils.asset in order to fetch and asset file supporting hash check, caching and multiple locations. :param name: the asset filename or URL :param asset_hash: asset hash (optional) :param algorithm: hash algorithm (optional, defaults to :data:`avocado.utils.asset.DEFAULT_HASH_ALGORITHM`) :param locations: list of URLs from where the asset can be fetched (optional) :param expire: time for the asset to expire :param find_only: When `True`, `fetch_asset` only looks for the asset in the cache, avoiding the download/move action. Defaults to `False`. :param cancel_on_missing: whether the test should be canceled if the asset was not found in the cache or if `fetch` could not add the asset to the cache. Defaults to `False`. :raises OSError: when it fails to fetch the asset or file is not in the cache and `cancel_on_missing` is `False`. :returns: asset file local path. """ if expire is not None: expire = data_structures.time_to_seconds(str(expire)) # If name has no protocol or network locations, attempt to find # the asset "by name" first. This is valid use case when the # asset has been previously put into any of the cache # directories, either manually or by the caching process # itself. parsed_name = asset.Asset.parse_name(name) if not (parsed_name.scheme or locations): try: return asset.Asset.get_asset_by_name(name, self.cache_dirs, expire, asset_hash) except OSError as e: if cancel_on_missing: self.cancel(f"Missing asset {name}") raise e asset_obj = asset.Asset(name, asset_hash, algorithm, locations, self.cache_dirs, expire) try: # return the path to the asset when it was found or fetched if find_only: return asset_obj.find_asset_file() else: return asset_obj.fetch() except OSError as e: # if asset is not in the cache or there was a problem fetching # the asset if cancel_on_missing: # cancel when requested self.cancel(f"Missing asset {name}") # otherwise re-throw OSError raise e