def __init__(self, signature_path, data_path, keyrings=None, blacklist=None): super().__init__() self.signature_path = signature_path self.data_path = data_path self.keyrings = ([] if keyrings is None else keyrings) self.blacklist = blacklist # We have to calculate the checksums now, because it's possible that # the files will be temporary/atomic files, deleted when a context # manager exits. I.e. the files aren't guaranteed to exist after this # constructor runs. # # Also, md5 is fine; this is not a security critical context, we just # want to be able to quickly and easily compare the file on disk # against the file on the server. with open(self.signature_path, 'rb') as fp: self.signature_checksum = calculate_signature(fp, hashlib.md5) with open(self.data_path, 'rb') as fp: self.data_checksum = calculate_signature(fp, hashlib.md5) self.keyring_checksums = [] for path in self.keyrings: with open(path, 'rb') as fp: checksum = calculate_signature(fp, hashlib.md5) self.keyring_checksums.append(checksum) if self.blacklist is None: self.blacklist_checksum = None else: with open(self.blacklist, 'rb') as fp: self.blacklist_checksum = calculate_signature(fp, hashlib.md5)
def test_calculate_signature_chunk_size(self): # Check that a file of exactly the chunk size works. with tempfile.TemporaryFile() as fp: fp.write(b'\0' * MiB) fp.seek(0) hash1 = calculate_signature(fp) fp.seek(0) hash2 = hashlib.sha256(fp.read()).hexdigest() self.assertEqual(hash1, hash2)
def test_calculate_signature_alternative_hash(self): # Check an alternative hash algorithm. with tempfile.TemporaryFile() as fp: # Ensure the file is bigger than chunk size. fp.write(b'\0' * (MiB + 1)) fp.seek(0) hash1 = calculate_signature(fp, hashlib.md5) fp.seek(0) hash2 = hashlib.md5(fp.read()).hexdigest() self.assertEqual(hash1, hash2)
def test_calculate_signature(self): # Check the default hash algorithm. with tempfile.TemporaryFile() as fp: # Ensure the file is bigger than chunk size. fp.write(b'\0' * (MiB + 1)) fp.seek(0) hash1 = calculate_signature(fp) fp.seek(0) hash2 = hashlib.sha256(fp.read()).hexdigest() self.assertEqual(hash1, hash2)
def _use_cached(txt, asc, keyrings, checksum=None, blacklist=None): if not os.path.exists(txt) or not os.path.exists(asc): return False with Context(*keyrings, blacklist=blacklist) as ctx: if not ctx.verify(asc, txt): return False if checksum is None: return True with open(txt, 'rb') as fp: got = calculate_signature(fp) return got == checksum
def _download_files(self): """Download and verify all the winning upgrade path's files.""" # If there is a device-signing key, the files can be signed by either # that or the image-signing key. keyrings = [config.gpg.image_signing] if os.path.exists(config.gpg.device_signing): keyrings.append(config.gpg.device_signing) # Now, go through all the file records in the winning upgrade path. # If the data file has already been downloaded and it has a valid # signature file, then we can save some bandwidth by not downloading # it again. downloads = [] signatures = [] checksums = [] # For the clean ups below, preserve recovery's log files. cache_dir = config.updater.cache_partition preserve = set(( os.path.join(cache_dir, 'log'), os.path.join(cache_dir, 'last_log'), )) for image_number, filerec in iter_path(self.winner): # Re-pack for arguments to get_files() and to collate the # signature path and checksum for the downloadable file. dst = os.path.join(cache_dir, os.path.basename(filerec.path)) asc = os.path.join(cache_dir, os.path.basename(filerec.signature)) checksum = filerec.checksum self.files.append((dst, (image_number, filerec.order))) self.files.append((asc, (image_number, filerec.order))) # Check the existence and signature of the file. if _use_cached(dst, asc, keyrings, checksum, self.blacklist): preserve.add(dst) preserve.add(asc) else: # Add the data file, which has a checksum. downloads.append( Record(urljoin(config.http_base, filerec.path), dst, checksum)) # Add the signature file, which does not have a checksum. downloads.append( Record(urljoin(config.http_base, filerec.signature), asc)) signatures.append((dst, asc)) checksums.append((dst, checksum)) # For any files we're about to download, we must make sure that none # of the destination file paths exist, otherwise the downloader will # throw exceptions. for record in downloads: safe_remove(record.destination) # Also delete cache partition files that we no longer need. for filename in os.listdir(cache_dir): path = os.path.join(cache_dir, filename) if path not in preserve: safe_remove(os.path.join(cache_dir, filename)) # Now, download all missing or ill-signed files, providing logging # feedback on progress. This download can be paused. The downloader # should also signal when the file downloads have started. self.downloader.get_files(downloads, pausable=True, signal_started=True) with ExitStack() as stack: # Set things up to remove the files if a SignatureError gets # raised or if the checksums don't match. If everything's okay, # we'll clear the stack before the context manager exits so none # of the files will get removed. for record in downloads: stack.callback(os.remove, record.destination) # Although we should never get there, if the downloading step # fails, clear out the self.files list so there's no possibilty # we'll try to move them later. stack.callback(setattr, self, 'files', []) # Verify the signatures on all the downloaded files. with Context(*keyrings, blacklist=self.blacklist) as ctx: for dst, asc in signatures: ctx.validate(asc, dst) # Verify the checksums. for dst, checksum in checksums: with open(dst, 'rb') as fp: got = calculate_signature(fp) if got != checksum: raise ChecksumError(dst, got, checksum) # Everything is fine so nothing needs to be cleared. stack.pop_all() log.info('all files available in {}', cache_dir) # Now, copy the files from the temporary directory into the location # for the upgrader. self._next.append(self._move_files)