def save_attrs(self, filenm="attrs"): """Save attributes from the in-memory catalog to a file specified by filenm.""" tmpfile = None assert not self.read_only finalpath = os.path.normpath(os.path.join(self.catalog_root, filenm)) try: tmp_num, tmpfile = tempfile.mkstemp(dir=self.catalog_root) tfile = os.fdopen(tmp_num, "w") for a in self.attrs.keys(): s = "S %s: %s\n" % (a, self.attrs[a]) tfile.write(s) tfile.close() os.chmod(tmpfile, self.file_mode) portable.rename(tmpfile, finalpath) except EnvironmentError, e: # This may get called in a situation where # the user does not have write access to the attrs # file. if tmpfile: portable.remove(tmpfile) if e.errno in (errno.EACCES, errno.EROFS): return else: raise
def publish_package(self): """This method is called by the server to publish a package. It moves the files associated with the transaction into the appropriate position in the server repository. Callers shall supply a fmri, repository, and transaction in fmri, repo, and trans, respectively.""" repo = self.repo pkg_name = self.fmri.pkg_name pkgdir = os.path.join(repo.manifest_root, urllib.quote(pkg_name, "")) # If the directory isn't there, create it. if not os.path.exists(pkgdir): os.makedirs(pkgdir) # mv manifest to pkg_name / version # A package may have no files, so there needn't be a manifest. mpath = os.path.join(self.dir, "manifest") if os.path.exists(mpath): portable.rename(mpath, os.path.join(pkgdir, urllib.quote(str(self.fmri.version), ""))) # Move each file to file_root, with appropriate directory # structure. for f in os.listdir(self.dir): src_path = os.path.join(self.dir, f) self.repo.cache_store.insert(f, src_path)
def save_attrs(self, filenm="attrs"): """Save attributes from the in-memory catalog to a file specified by filenm.""" tmpfile = None assert not self.read_only finalpath = os.path.normpath( os.path.join(self.catalog_root, filenm)) try: tmp_num, tmpfile = tempfile.mkstemp( dir=self.catalog_root) tfile = os.fdopen(tmp_num, "w") for a in self.attrs.keys(): s = "S %s: %s\n" % (a, self.attrs[a]) tfile.write(s) tfile.close() os.chmod(tmpfile, self.file_mode) portable.rename(tmpfile, finalpath) except EnvironmentError, e: # This may get called in a situation where # the user does not have write access to the attrs # file. if tmpfile: portable.remove(tmpfile) if e.errno == errno.EACCES: return else: raise
def __storebytype(self): """ create manifest.<typename> files to accelerate partial parsing of manifests. Separate from __storeback code to allow upgrade to reuse existing on disk manifests""" assert self.loaded # create per-action type cache; use rename to avoid # corrupt files if ^C'd in the middle # XXX consider use of per-process tmp file names for n in self.actions_bytype.keys(): f = file(self.__file_path("manifest.%s.tmp" % n), "w") for a in self.actions_bytype[n]: f.write("%s\n" % a) f.close() portable.rename(self.__file_path("manifest.%s.tmp" % n), self.__file_path("manifest.%s" % n)) # create dircache f = file(self.__file_path("manifest.dircache.tmp"), "w") dirs = self.__actions_to_dirs() for s in self.__gen_dirs_to_str(dirs): f.write(s) f.close() portable.rename(self.__file_path("manifest.dircache.tmp"), self.__file_path("manifest.dircache"))
def __append_to_catalog(self, pkgstr): """Write string named pkgstr to the catalog. This routine handles moving the catalog to a temporary file, appending the new string, and renaming the temporary file on top of the existing catalog.""" # Create tempfile tmp_num, tmpfile = tempfile.mkstemp(dir=self.catalog_root) try: # use fdopen since we already have a filehandle tfile = os.fdopen(tmp_num, "w") except OSError: portable.remove(tmpfile) raise # Try to open catalog file. If it doesn't exist, # create an empty catalog file, and then open it read only. try: pfile = open(self.catalog_file, "rb") except IOError as e: if e.errno == errno.ENOENT: # Creating an empty file open(self.catalog_file, "wb").close() pfile = open(self.catalog_file, "rb") else: portable.remove(tmpfile) raise # Make sure we're at the start of the file pfile.seek(0) # Write all of the existing entries in the catalog # into the tempfile. Then append the new lines at the # end. try: for entry in pfile: if entry == pkgstr: raise CatalogException("Package {0} is already in " "the catalog".format(pkgstr)) else: tfile.write(entry) tfile.write(pkgstr) except Exception: portable.remove(tmpfile) raise # Close our open files pfile.close() tfile.close() # Set the permissions on the tempfile correctly. # Mkstemp creates files as 600. Rename the new # cataog on top of the old one. try: os.chmod(tmpfile, self.file_mode) portable.rename(tmpfile, self.catalog_file) except EnvironmentError: portable.remove(tmpfile) raise
def publish_package(self): """This method is called by the server to publish a package. It moves the files associated with the transaction into the appropriate position in the server repository. Callers shall supply a fmri, repository, and transaction in fmri, repo, and trans, respectively.""" repo = self.repo pkg_name = self.fmri.pkg_name pkgdir = os.path.join(repo.manifest_root, urllib.quote(pkg_name, "")) # If the directory isn't there, create it. if not os.path.exists(pkgdir): os.makedirs(pkgdir) # mv manifest to pkg_name / version # A package may have no files, so there needn't be a manifest. mpath = os.path.join(self.dir, "manifest") if os.path.exists(mpath): portable.rename( mpath, os.path.join(pkgdir, urllib.quote(str(self.fmri.version), ""))) # Move each file to file_root, with appropriate directory # structure. for f in os.listdir(self.dir): src_path = os.path.join(self.dir, f) self.repo.cache_store.insert(f, src_path)
def rebuild_index_from_scratch(self, fmris, tmp_index_dir=None): """Removes any existing index directory and rebuilds the index based on the fmris and manifests provided as an argument. The "tmp_index_dir" parameter allows for a different directory than the default to be used.""" self.file_version_number = INITIAL_VERSION_NUMBER self.empty_index = True # A lock can't be held while the index directory is being # removed as that can cause rmtree() to fail when using # NFS. As such, attempt to get the lock first, then # unlock, immediately rename the old index directory, # and then remove the old the index directory and # create a new one. self.lock() self.unlock() portable.rename(self._index_dir, self._index_dir + ".old") try: shutil.rmtree(self._index_dir + ".old") makedirs(self._index_dir) except OSError as e: if e.errno == errno.EACCES: raise search_errors.ProblematicPermissionsIndexException( self._index_dir) self._generic_update_index(fmris, IDX_INPUT_TYPE_FMRI, tmp_index_dir) self.empty_index = False
def __storebytype(self): """ create manifest.<typename> files to accelerate partial parsing of manifests. Separate from __storeback code to allow upgrade to reuse existing on disk manifests""" assert self.loaded t_dir = self.__file_dir() # create per-action type cache; use rename to avoid # corrupt files if ^C'd in the middle for n in self.actions_bytype.keys(): t_prefix = "manifest.%s." % n fd, fn = tempfile.mkstemp(dir=t_dir, prefix=t_prefix) f = os.fdopen(fd, "wb") for a in self.actions_bytype[n]: f.write("%s\n" % a) f.close() os.chmod(fn, PKG_FILE_MODE) portable.rename(fn, self.__file_path("manifest.%s" % n)) # create dircache fd, fn = tempfile.mkstemp(dir=t_dir, prefix="manifest.dircache.") f = os.fdopen(fd, "wb") dirs = self.__actions_to_dirs() for s in self.__gen_dirs_to_str(dirs): f.write(s) f.close() os.chmod(fn, PKG_FILE_MODE) portable.rename(fn, self.__file_path("manifest.dircache"))
def testForcibleRename(self): # rename a file on top of another file which already exists (fd1, path1) = tempfile.mkstemp() os.write(fd1, "foo") (fd2, path2) = tempfile.mkstemp() os.write(fd2, "bar") os.close(fd1) os.close(fd2) portable.rename(path1, path2) self.failIf(os.path.exists(path1)) self.failUnless(os.path.exists(path2)) fd2 = os.open(path2, os.O_RDONLY) self.assertEquals(os.read(fd2, 3), "foo") os.close(fd2) os.unlink(path2)
def testForcibleRename(self): # rename a file on top of another file which already exists (fd1, path1) = tempfile.mkstemp() os.write(fd1, b"foo") (fd2, path2) = tempfile.mkstemp() os.write(fd2, b"bar") os.close(fd1) os.close(fd2) portable.rename(path1, path2) self.assertFalse(os.path.exists(path1)) self.assertTrue(os.path.exists(path2)) fd2 = os.open(path2, os.O_RDONLY) self.assertEqual(os.read(fd2, 3), b"foo") os.close(fd2) os.unlink(path2)
def testRenameOfRunningExecutable(self): if util.get_canonical_os_type() != 'windows': return import pkg.portable.os_windows as os_windows cwd = os.getcwdu() exefilesrc = 'C:\\Windows\\system32\\more.com' self.assertTrue(os.path.exists(exefilesrc)) # create an image, copy an executable into it, # run the executable, replace the executable tdir1 = tempfile.mkdtemp() img1 = image.Image(tdir1, imgtype=image.IMG_USER, should_exist=False, user_provided_dir=True) img1.history.client_name = "pkg-test" img1.set_attrs(False, "test", origins=["http://localhost:10000"], refresh_allowed=False) exefile = os.path.join(tdir1, 'less.com') shutil.copyfile(exefilesrc, exefile) proc = subprocess.Popen([exefile], stdin=subprocess.PIPE) self.assertRaises(OSError, os.unlink, exefile) fd1, path1 = tempfile.mkstemp(dir=tdir1) os.write(fd1, b"foo") os.close(fd1) portable.rename(path1, exefile) fd2 = os.open(exefile, os.O_RDONLY) self.assertEqual(os.read(fd2, 3), "foo") os.close(fd2) proc.communicate() # Make sure that the moved executable gets deleted # This is a white-box test # To simulate running another process, we delete the cache # and call get_trashdir as if another file was being moved # to the trash. os_windows.cached_image_info = [] os_windows.get_trashdir(exefile) self.assertTrue(not os.path.exists( os.path.join(img1.imgdir, os_windows.trashname))) # cleanup os.chdir(cwd) shutil.rmtree(tdir1)
def shift_file(self, use_dir, suffix): """Moves the existing file with self._name in directory use_dir to a new file named self._name + suffix in directory use_dir. If it has done this previously, it removes the old file it moved. It also opens the newly moved file and uses that as the file for its file handle. """ assert self._file_handle is None orig_path = os.path.join(use_dir, self._name) new_path = os.path.join(use_dir, self._name + suffix) portable.rename(orig_path, new_path) tmp_name = self._name self._name = self._name + suffix self.open(use_dir) self._name = tmp_name if self._old_suffix is not None: os.remove(os.path.join(use_dir, self._old_suffix)) self._old_suffix = self._name + suffix
def testRenameOfRunningExecutable(self): if util.get_canonical_os_type() != 'windows': return import pkg.portable.os_windows as os_windows cwd = os.getcwdu() exefilesrc = 'C:\\Windows\\system32\\more.com' self.assert_(os.path.exists(exefilesrc)) # create an image, copy an executable into it, # run the executable, replace the executable tdir1 = tempfile.mkdtemp() img1 = image.Image(tdir1, imgtype=image.IMG_USER, should_exist=False, user_provided_dir=True) img1.history.client_name = "pkg-test" img1.set_attrs(False, "test", origins=["http://localhost:10000"], refresh_allowed=False) exefile = os.path.join(tdir1, 'less.com') shutil.copyfile(exefilesrc, exefile) proc = subprocess.Popen([exefile], stdin = subprocess.PIPE) self.assertRaises(OSError, os.unlink, exefile) fd1, path1 = tempfile.mkstemp(dir = tdir1) os.write(fd1, "foo") os.close(fd1) portable.rename(path1, exefile) fd2 = os.open(exefile, os.O_RDONLY) self.assertEquals(os.read(fd2, 3), "foo") os.close(fd2) proc.communicate() # Make sure that the moved executable gets deleted # This is a white-box test # To simulate running another process, we delete the cache # and call get_trashdir as if another file was being moved # to the trash. os_windows.cached_image_info = [] os_windows.get_trashdir(exefile) self.assert_(not os.path.exists(os.path.join(img1.imgdir, os_windows.trashname))) # cleanup os.chdir(cwd) shutil.rmtree(tdir1)
def insert(self, hashval, src_path): """Add the content at "src_path" to the files under the name "hashval". Returns the path to the inserted file.""" if self.readonly: raise NeedToModifyReadOnlyFileManager(hashval) cur_full_path, dest_full_path = \ self.__select_path(hashval, True) if cur_full_path and cur_full_path != dest_full_path: # The file is stored in an old location and needs to be # moved to a new location. To prevent disruption of # service or other race conditions, rename the source # file into the old place first. try: portable.rename(src_path, cur_full_path) except EnvironmentError, e: if e.errno == errno.EACCES or \ e.errno == errno.EROFS: raise FMPermissionsException(e.filename) raise src_path = cur_full_path
def publish_package(self): """This method is called by the server to publish a package. It moves the files associated with the transaction into the appropriate position in the server repository. Callers shall supply a fmri, repo store, and transaction in fmri, rstore, and trans, respectively.""" pkg_name = self.fmri.pkg_name # mv manifest to pkg_name / version src_mpath = os.path.join(self.dir, "manifest") dest_mpath = self.rstore.manifest(self.fmri) misc.makedirs(os.path.dirname(dest_mpath)) portable.rename(src_mpath, dest_mpath) # Move each file to file_root, with appropriate directory # structure. for f in os.listdir(self.dir): if f == "append": continue src_path = os.path.join(self.dir, f) self.rstore.cache_store.insert(f, src_path)
def publish_package(self): """This method is called by the server to publish a package. It moves the files associated with the transaction into the appropriate position in the server repository. Callers shall supply a fmri, config, and transaction in fmri, cfg, and trans, respectively.""" cfg = self.cfg pkg_name = self.fmri.pkg_name pkgdir = os.path.join(cfg.pkg_root, urllib.quote(pkg_name, "")) # If the directory isn't there, create it. if not os.path.exists(pkgdir): os.makedirs(pkgdir) # mv manifest to pkg_name / version # A package may have no files, so there needn't be a manifest. mpath = os.path.join(self.dir, "manifest") if os.path.exists(mpath): portable.rename(mpath, os.path.join(pkgdir, urllib.quote(str(self.fmri.version), ""))) # Move each file to file_root, with appropriate directory # structure. for f in os.listdir(self.dir): path = misc.hash_file_name(f) src_path = os.path.join(self.dir, f) dst_path = os.path.join(cfg.file_root, path) try: portable.rename(src_path, dst_path) except OSError, e: # XXX We might want to be more careful with this # exception, and only try makedirs() if rename() # failed because the directory didn't exist. # # I'm not sure it matters too much, except that # if makedirs() fails, we'll see that exception, # rather than the original one from rename(). # # Interestingly, rename() failing due to missing # path component fails with ENOENT, not ENOTDIR # like rename(2) suggests (6578404). try: os.makedirs(os.path.dirname(dst_path)) except OSError, e: if e.errno != errno.EEXIST: raise portable.rename(src_path, dst_path)
# moved to a new location. To prevent disruption of # service or other race conditions, rename the source # file into the old place first. try: portable.rename(src_path, cur_full_path) except EnvironmentError, e: if e.errno == errno.EACCES or \ e.errno == errno.EROFS: raise FMPermissionsException(e.filename) raise src_path = cur_full_path p_dir = os.path.dirname(dest_full_path) try: # Move the file into place. portable.rename(src_path, dest_full_path) except EnvironmentError, e: if e.errno == errno.ENOENT and not os.path.isdir(p_dir): try: os.makedirs(p_dir) except EnvironmentError, e: if e.errno == errno.EACCES or \ e.errno == errno.EROFS: raise FMPermissionsException(e.filename) # If directory creation failed due to # EEXIST, but the entry it failed for # isn't the immediate parent, assume # there's a larger problem and re-raise # the exception. For file_manager, this # is believed to be unlikely. if not (e.errno == errno.EEXIST and e.filename == p_dir):
def __place(self, hashval, src_path, pfunc): """Add the content at "src_path" to the files under the name "hashval". Returns the path to the inserted file.""" if self.readonly: raise NeedToModifyReadOnlyFileManager(hashval) cur_full_path, dest_full_path = \ self.__select_path(hashval, True) if cur_full_path and cur_full_path != dest_full_path: # The file is stored in an old location and needs to be # moved to a new location. To prevent disruption of # service or other race conditions, rename the source # file into the old place first. try: portable.rename(src_path, cur_full_path) except EnvironmentError as e: if e.errno == errno.EACCES or \ e.errno == errno.EROFS: raise FMPermissionsException(e.filename) raise src_path = cur_full_path while True: try: # Place the file. pfunc(src_path, dest_full_path) except EnvironmentError as e: p_dir = os.path.dirname(dest_full_path) if e.errno == errno.ENOENT and \ not os.path.isdir(p_dir): try: os.makedirs(p_dir) except EnvironmentError as e: if e.errno == errno.EACCES or \ e.errno == errno.EROFS: raise FMPermissionsException(e.filename) # If directory creation failed # due to EEXIST, but the entry # it failed for isn't the # immediate parent, assume # there's a larger problem and # re-raise the exception. For # file_manager, this is believed # to be unlikely. if not (e.errno == errno.EEXIST and e.filename == p_dir): raise # Parent directory created successsfully # so loop again to retry place. elif e.errno == errno.ENOENT and \ not os.path.exists(src_path): if os.path.exists(dest_full_path): # Item has already been moved # into cache by another process; # nothing more to do. (This # could happen during parallel # publication.) return dest_full_path raise FMInsertionFailure(src_path, dest_full_path) elif e.errno == errno.EACCES or \ e.errno == errno.EROFS: raise FMPermissionsException(e.filename) elif e.errno != errno.ENOENT: raise apx._convert_error(e) else: # Success! break # Attempt to remove the parent directory of the file's original # location to ensure empty directories aren't left behind. if cur_full_path: try: os.removedirs(os.path.dirname(cur_full_path)) except EnvironmentError as e: if e.errno == errno.ENOENT or \ e.errno == errno.EEXIST: pass elif e.errno == errno.EACCES or \ e.errno == errno.EROFS: raise FMPermissionsException(e.filename) else: raise # Return the location of the placed file to the caller. return dest_full_path
raise api_errors.ReadOnlyFileSystemException(e.filename) raise mfile = os.fdopen(fd, "wb") # # We specifically avoid sorting manifests before writing # them to disk-- there's really no point in doing so, since # we'll sort actions globally during packaging operations. # mfile.write(self.tostr_unsorted()) mfile.close() try: os.chmod(fn, PKG_FILE_MODE) portable.rename(fn, mfst_path) except EnvironmentError, e: if e.errno == errno.EACCES: raise api_errors.PermissionsException(e.filename) if e.errno == errno.EROFS: raise api_errors.ReadOnlyFileSystemException(e.filename) raise def get_variants(self, name): if name not in self.attributes: return None variants = self.attributes[name] if not isinstance(variants, str): return variants return [variants]
def do_reversion(pub, ref_pub, target_repo, ref_xport, changes, ignores, cmp_policy, ref_repo, ref, ref_xport_cfg): """Do the repo reversion. Return 'True' if repo got modified, 'False' otherwise.""" global temp_root, tracker, dry_run, repo_finished, repo_modified target_cat = target_repo.get_catalog(pub=pub) ref_cat = fetch_catalog(ref_pub, ref_xport, temp_root) latest_pkgs = get_latest(target_cat) latest_ref_pkgs = get_latest(ref_cat) no_revs = get_matching_pkgs(target_cat, changes) # We use bulk prefetching for faster transport of the manifests. # Prefetch requires an intent which it sends to the server. Here # we just use operation=reversion for all FMRIs. intent = "operation=reversion;" # use list() to force the zip() to evaluate ref_pkgs = list(zip(latest_ref_pkgs.values(), repeat(intent))) # Retrieve reference manifests. # Try prefetching manifests in bulk first for faster, parallel # transport. Retryable errors during prefetch are ignored and # manifests are retrieved again during the "Reading" phase. ref_xport.prefetch_manifests(ref_pkgs, progtrack=tracker) # Need to change the output of mfst_fetch since otherwise we # would see "Download Manifests x/y" twice, once from the # prefetch and once from the actual manifest analysis. tracker.mfst_fetch = progress.GoalTrackerItem(_("Analyzing Manifests")) tracker.manifest_fetch_start(len(latest_pkgs)) reversioned_pkgs = set() depend_changes = {} dups = 0 # target pkg has equal version to ref pkg new_p = 0 # target pkg not in ref sucs = 0 # ref pkg is successor to pkg in targ nrevs = 0 # pkgs requested to not be reversioned by user for p in latest_pkgs: # First check if the package is in the list of FMRIs the user # doesn't want to reversion. if p in no_revs: nrevs += 1 tracker.manifest_fetch_progress(completion=True) continue # Check if the package is in the ref repo, if not: ignore. if not p in latest_ref_pkgs: new_p += 1 tracker.manifest_fetch_progress(completion=True) continue pfmri = latest_pkgs[p] # Ignore if latest package is the same in targ and ref. if pfmri == latest_ref_pkgs[p]: dups += 1 tracker.manifest_fetch_progress(completion=True) continue # Ignore packages where ref version is higher. if latest_ref_pkgs[p].is_successor(pfmri): sucs += 1 tracker.manifest_fetch_progress(completion=True) continue # Pull the manifests for target and ref repo. dm = get_manifest(target_repo, pub, pfmri) rm = ref_xport.get_manifest(latest_ref_pkgs[p]) tracker.manifest_fetch_progress(completion=True) tdeps = set() rdeps = set() # Diff target and ref manifest. # action only in targ, action only in ref, common action ta, ra, ca = manifest.Manifest.comm([dm, rm], cmp_policy=cmp_policy) # Check for manifest changes. if not all(use_ref(a, tdeps, ignores) for a in ta) \ or not all(use_ref(a, rdeps, ignores) for a in ra): continue # Both dep lists should be equally long in case deps have just # changed. If not, it means a dep has been added or removed and # that means content change. if len(tdeps) != len(rdeps): continue # If len is not different we still have to make sure that # entries have the same pkg stem. The test above just saves time # in some cases. if not all(td in rdeps for td in tdeps): continue # Pkg only contains dependency change. Keep for further # analysis. if tdeps: depend_changes[pfmri.get_pkg_stem(anarchy=True)] = tdeps continue # Pkg passed all checks and can be reversioned. reversioned_pkgs.add(pfmri.get_pkg_stem(anarchy=True)) tracker.manifest_fetch_done() def has_changed(pstem, seen=None, depth=0): """Determine if a package or any of its dependencies has changed. Function will check if a dependency had a content change. If it only had a dependency change, analyze its dependencies recursively. Only if the whole dependency chain didn't have any content change it is safe to reversion the package. Note about circular dependencies: The function keeps track of pkgs it already processed by stuffing them into the set 'seen'. However, 'seen' gets updated before the child dependencies of the current pkg are examined. This works if 'seen' is only used for one dependency chain since the function immediately comes back with a True result if a pkg has changed further down the tree. However, if 'seen' is re-used between runs, it will return prematurely, likely returning wrong results. """ MAX_DEPTH = 100 if not seen: seen = set() if pstem in seen: return False depth += 1 if depth > MAX_DEPTH: # Let's make sure we don't run into any # recursion limits. If the dep chain is too deep # just treat as changed pkg. error( _("Dependency chain depth of >{md:d} detected for" " {p}.").format(md=MAX_DEPTH, p=p)) return True # Pkg has no change at all. if pstem in reversioned_pkgs: return False # Pkg must have content change, if it had no change it would be # in reversioned_pkgs, and if it had just a dep change it would # be in depend_changes. if pstem not in depend_changes: return True # We need to update 'seen' here, otherwise we won't find this # entry in case of a circular dependency. seen.add(pstem) return any(has_changed(d, seen, depth) for d in depend_changes[pstem]) # Check if packages which just have a dep change can be reversioned by # checking if child dependencies also have no content change. dep_revs = 0 for p in depend_changes: if not has_changed(p): dep_revs += 1 reversioned_pkgs.add(p) status = [] if cmp_policy == CMP_UNSIGNED: status.append((_("WARNING: Signature changes in file content " "ignored in resurfacing"))) status.append((_("Packages to process:"), str(len(latest_pkgs)))) status.append((_("New packages:"), str(new_p))) status.append((_("Unmodified packages:"), str(dups))) if sucs: # This only happens if reference repo is ahead of target repo, # so only show if it actually happened. status.append((_("Packages with successors in " "reference repo:"), str(sucs))) if nrevs: # This only happens if user specified pkgs to not revert, # so only show if it actually happened. status.append((_("Packages not to be reversioned by user " "request:"), str(nrevs))) status.append((_("Packages with no content change:"), str(len(reversioned_pkgs) - dep_revs))) status.append((_("Packages which only have dependency change:"), str(len(depend_changes)))) status.append( (_("Packages with unchanged dependency chain:"), str(dep_revs))) status.append( (_("Packages to be reversioned:"), str(len(reversioned_pkgs)))) rjust_status = max(len(s[0]) for s in status) rjust_value = max(len(s[1]) for s in status) for s in status: msg("{0} {1}".format(s[0].rjust(rjust_status), s[1].rjust(rjust_value))) if not reversioned_pkgs: msg(_("\nNo packages to reversion.")) return False if dry_run: msg(_("\nReversioning packages (dry-run).")) else: msg(_("\nReversioning packages.")) # Start the main pass. Reversion packages from reversioned_pkgs to the # version in the ref repo. For packages which don't get reversioned, # check if the dependency versions are still correct, fix if necessary. tracker.reversion_start(len(latest_pkgs), len(reversioned_pkgs)) for p in latest_pkgs: tracker.reversion_add_progress(pfmri, pkgs=1) modified = False # Get the pkg fmri (pfmri) of the latest version based on if it # has been reversioned or not. stem = latest_pkgs[p].get_pkg_stem(anarchy=True) if stem in reversioned_pkgs: tracker.reversion_add_progress(pfmri, reversioned=1) if dry_run: continue pfmri = latest_ref_pkgs[p] # Retrieve manifest from ref repo and replace the one in # the target repo. We don't have to adjust depndencies # for these packages because they will not depend on # anything we'll reversion. rmani = ref_xport.get_manifest(pfmri) if cmp_policy == CMP_UNSIGNED: # Files with different signed content hash # values can have equivalent unsigned content # hash. CMP_UNSIGNED relaxes comparison # constraints and allows this case to compare # as equal. The reversioned manifest may # reference file data that is not present in # the target repository, so ensure that any # missing file data is added to the target # repository. add_missing_files(target_repo, pub, latest_pkgs[p], pfmri, rmani, ref, ref_repo, ref_xport, ref_xport_cfg, ref_pub) opath = target_repo.manifest(latest_pkgs[p], pub) os.remove(opath) path = target_repo.manifest(pfmri, pub) try: repo_modified = True repo_finished = False portable.rename(rmani.pathname, path) except OSError as e: abort(err=_("Could not reversion manifest " "{path}: {err}").format(path=path, err=str(e))) continue # For packages we don't reversion we have to check if they # depend on a reversioned package. # Since the version of this dependency might be removed from the # repo, we have to adjust the dep version to the one of the # reversioned pkg. pfmri = latest_pkgs[p] omani = get_manifest(target_repo, pub, pfmri) mani = manifest.Manifest(pfmri) for act in omani.gen_actions(): nact = adjust_dep_action(p, act, latest_ref_pkgs, reversioned_pkgs, ref_xport) if nact: mani.add_action(nact, misc.EmptyI) if nact is not act: modified = True # Only touch manifest if something actually changed. if modified: tracker.reversion_add_progress(pfmri, adjusted=1) if not dry_run: path = target_repo.manifest(pfmri, pub) repo_modified = True repo_finished = False mani.store(path) tracker.reversion_done() return True
def lookup(self, hashval, opener=False, check_existence=True): """Find the file for hashval. The "hashval" parameter contains the name of the file to be found. The "opener" parameter determines whether the function will return a path or an open file handle.""" cur_full_path, dest_full_path = self.__select_path( hashval, check_existence) if not cur_full_path: return None # If the depot isn't readonly and the file isn't in the location # that the primary layout thinks it should be, try to move the # file into the right place. if dest_full_path != cur_full_path and not self.readonly: p_sdir = os.path.dirname(cur_full_path) try: # Attempt to move the file from the old location # to the preferred location. try: portable.rename(cur_full_path, dest_full_path) except OSError as e: if e.errno != errno.ENOENT: raise p_ddir = os.path.dirname(dest_full_path) if os.path.isdir(p_ddir): raise try: os.makedirs(p_ddir) except EnvironmentError as e: if e.errno == errno.EACCES or \ e.errno == errno.EROFS: raise FMPermissionsException(e.filename) # If directory creation failed # due to EEXIST, but the entry # it failed for isn't the # immediate parent, assume # there's a larger problem # and re-raise the exception. # For file_manager, this is # believed to be unlikely. if not (e.errno == errno.EEXIST and e.filename == p_ddir): raise portable.rename(cur_full_path, dest_full_path) # Since the file has been moved, point at the # new destination *before* attempting to remove # the (now possibly empty) parent directory of # of the source file. cur_full_path = dest_full_path # This may fail because other files can still # exist in the parent path for the source, so # must be done last. os.removedirs(p_sdir) except EnvironmentError: # If there's an error during these operations, # check that cur_full_path still exists. If # it's gone, return None. if not os.path.exists(cur_full_path): return None if opener: return open(cur_full_path, "rb") return cur_full_path
def install(self, pkgplan, orig): """Client-side method that installs a file.""" mode = None try: mode = int(self.attrs.get("mode", None), 8) except (TypeError, ValueError): # Mode isn't valid, so let validate raise a more # informative error. self.validate(fmri=pkgplan.destination_fmri) owner, group = self.get_fsobj_uid_gid(pkgplan, pkgplan.destination_fmri) final_path = self.get_installed_path(pkgplan.image.get_root()) # Don't allow installation through symlinks. self.fsobj_checkpath(pkgplan, final_path) if not os.path.exists(os.path.dirname(final_path)): self.makedirs(os.path.dirname(final_path), mode=misc.PKG_DIR_MODE, fmri=pkgplan.destination_fmri) elif (not orig and not pkgplan.origin_fmri and "preserve" in self.attrs and self.attrs["preserve"] not in ("abandon", "install-only") and os.path.isfile(final_path)): # Unpackaged editable file is already present during # initial install; salvage it before continuing. pkgplan.salvage(final_path) # XXX If we're upgrading, do we need to preserve file perms from # existing file? # check if we have a save_file active; if so, simulate file # being already present rather than installed from scratch if "save_file" in self.attrs: orig = self.restore_file(pkgplan.image) # See if we need to preserve the file, and if so, set that up. # # XXX What happens when we transition from preserve to # non-preserve or vice versa? Do we want to treat a preserve # attribute as turning the action into a critical action? # # XXX We should save the originally installed file. It can be # used as an ancestor for a three-way merge, for example. Where # should it be stored? pres_type = self._check_preserve(orig, pkgplan) do_content = True old_path = None if pres_type == True or (pres_type and pkgplan.origin_fmri == pkgplan.destination_fmri): # File is marked to be preserved and exists so don't # reinstall content. do_content = False elif pres_type == "legacy": # Only rename old file if this is a transition to # preserve=legacy from something else. if orig.attrs.get("preserve", None) != "legacy": old_path = final_path + ".legacy" elif pres_type == "renameold.update": old_path = final_path + ".update" elif pres_type == "renameold": old_path = final_path + ".old" elif pres_type == "renamenew": final_path = final_path + ".new" elif pres_type == "abandon": return # If it is a directory (and not empty) then we should # salvage the contents. if os.path.exists(final_path) and \ not os.path.islink(final_path) and \ os.path.isdir(final_path): try: os.rmdir(final_path) except OSError as e: if e.errno == errno.ENOENT: pass elif e.errno in (errno.EEXIST, errno.ENOTEMPTY): pkgplan.salvage(final_path) elif e.errno != errno.EACCES: # this happens on Windows raise # XXX This needs to be modularized. if do_content and self.needsdata(orig, pkgplan): tfilefd, temp = tempfile.mkstemp(dir=os.path.dirname(final_path)) if not self.data: # The state of the filesystem changed after the # plan was prepared; attempt a one-off # retrieval of the data. self.data = self.__set_data(pkgplan) stream = self.data() tfile = os.fdopen(tfilefd, "wb") try: # Always verify using the most preferred hash hash_attr, hash_val, hash_func = \ digest.get_preferred_hash(self) shasum = misc.gunzip_from_stream(stream, tfile, hash_func) except zlib.error as e: raise ActionExecutionError( self, details=_("Error decompressing payload: " "{0}").format(" ".join([str(a) for a in e.args])), error=e) finally: tfile.close() stream.close() if shasum != hash_val: raise ActionExecutionError( self, details=_("Action data hash verification " "failure: expected: {expected} computed: " "{actual} action: {action}").format( expected=hash_val, actual=shasum, action=self)) else: temp = final_path try: os.chmod(temp, mode) except OSError as e: # If the file didn't exist, assume that's intentional, # and drive on. if e.errno != errno.ENOENT: raise else: return try: portable.chown(temp, owner, group) except OSError as e: if e.errno != errno.EPERM: raise # XXX There's a window where final_path doesn't exist, but we # probably don't care. if do_content and old_path: try: portable.rename(final_path, old_path) except OSError as e: if e.errno != errno.ENOENT: # Only care if file isn't gone already. raise # This is safe even if temp == final_path. try: portable.rename(temp, final_path) except OSError as e: raise api_errors.FileInUseException(final_path) # Handle timestamp if specified (and content was installed). if do_content and "timestamp" in self.attrs: t = misc.timestamp_to_time(self.attrs["timestamp"]) try: os.utime(final_path, (t, t)) except OSError as e: if e.errno != errno.EACCES: raise # On Windows, the time cannot be changed on a # read-only file os.chmod(final_path, stat.S_IRUSR | stat.S_IWUSR) os.utime(final_path, (t, t)) os.chmod(final_path, mode) # Handle system attributes. sattr = self.attrs.get("sysattr") if sattr: if isinstance(sattr, list): sattr = ",".join(sattr) sattrs = sattr.split(",") if len(sattrs) == 1 and \ sattrs[0] not in portable.get_sysattr_dict(): # not a verbose attr, try as a compact attr seq arg = sattrs[0] else: arg = sattrs try: portable.fsetattr(final_path, arg) except OSError as e: if e.errno != errno.EINVAL: raise warn = _("System attributes are not supported " "on the target image filesystem; 'sysattr'" " ignored for {0}").format(self.attrs["path"]) pkgplan.image.imageplan.pd.add_item_message( pkgplan.destination_fmri, misc.time_to_timestamp(time.time()), MSG_WARNING, warn) except ValueError as e: warn = _("Could not set system attributes for {path}" "'{attrlist}': {err}").format(attrlist=sattr, err=e, path=self.attrs["path"]) pkgplan.image.imageplan.pd.add_item_message( pkgplan.destination_fmri, misc.time_to_timestamp(time.time()), MSG_WARNING, warn)
def recv(cls, filep, path, pub=None): """A static method that takes a file-like object and a path. This is the other half of catalog.send(). It reads a stream as an incoming catalog and lays it down on disk.""" bad_fmri = None if not os.path.exists(path): os.makedirs(path) afd, attrpath = tempfile.mkstemp(dir=path) cfd, catpath = tempfile.mkstemp(dir=path) attrf = os.fdopen(afd, "w") catf = os.fdopen(cfd, "w") attrpath_final = os.path.normpath(os.path.join(path, "attrs")) catpath_final = os.path.normpath(os.path.join(path, "catalog")) try: for s in filep: slen = len(s) # If line is too short, process the next one if slen < 2: continue # check that line is in the proper format elif not s[1].isspace(): continue elif not s[0] in known_prefixes: catf.write(s) elif s.startswith("S "): attrf.write(s) elif s.startswith("R "): catf.write(s) else: # XXX Need to be able to handle old and # new format catalogs. try: f = fmri.PkgFmri(s[2:]) except fmri.IllegalFmri, e: bad_fmri = e continue catf.write("%s %s %s %s\n" % (s[0], "pkg", f.pkg_name, f.version)) except: # Re-raise all uncaught exceptions after performing # cleanup. attrf.close() catf.close() os.remove(attrpath) os.remove(catpath) raise # If we got a parse error on FMRIs and transfer # wasn't truncated, raise a FmriFailures error. if bad_fmri: attrf.close() catf.close() os.remove(attrpath) os.remove(catpath) raise bad_fmri # Write the publisher's origin into our attributes if pub: origstr = "S origin: %s\n" % pub["origin"] attrf.write(origstr) attrf.close() catf.close() # Mkstemp sets mode 600 on these files by default. # Restore them to 644, so that unprivileged users # may read these files. os.chmod(attrpath, cls.file_mode) os.chmod(catpath, cls.file_mode) portable.rename(attrpath, attrpath_final) portable.rename(catpath, catpath_final)
tfile.write(entry) tfile.write(pkgstr) except Exception: portable.remove(tmpfile) raise # Close our open files pfile.close() tfile.close() # Set the permissions on the tempfile correctly. # Mkstemp creates files as 600. Rename the new # cataog on top of the old one. try: os.chmod(tmpfile, self.file_mode) portable.rename(tmpfile, self.catalog_file) except EnvironmentError: portable.remove(tmpfile) raise @staticmethod def fast_cache_fmri(d, pfmri, sversion, pubs): """Store the fmri in a data structure 'd' for fast lookup, but requires the caller to provide all the data pre-sorted and processed. 'd' is a dict that maps each package name to another dictionary 'pfmri' is the fmri object to be cached. 'sversion' is the string representation of pfmri.version.
def test_01_basics(self): """Test that adding a https publisher works and that a package can be installed from that publisher.""" self.ac.start() # Test that creating an image using a HTTPS repo without # providing any keys or certificates fails. self.assertRaises(TransportFailures, self.image_create, self.acurl1) self.pkg_image_create(repourl=self.acurl1, exit=1) api_obj = self.image_create() # Test that adding a HTTPS repo fails if the image does not # contain the trust anchor to verify the server's identity. self.pkg("set-publisher -k {key} -c {cert} -p {url}".format( url=self.acurl1, cert=os.path.join(self.cs_dir, self.get_cli_cert("test")), key=os.path.join(self.keys_dir, self.get_cli_key("test")), ), exit=1) # Add the trust anchor needed to verify the server's identity to # the image. self.seed_ta_dir("ta7") self.pkg("set-publisher -k {key} -c {cert} -p {url}".format( url=self.acurl1, cert=os.path.join(self.cs_dir, self.get_cli_cert("test")), key=os.path.join(self.keys_dir, self.get_cli_key("test")), )) api_obj = self.get_img_api_obj() self._api_install(api_obj, ["example_pkg"]) # Verify that if the image location changes, SSL operations # are still possible. (The paths to key and cert should be # updated on load.) opath = self.img_path() npath = opath.replace("image0", "new.image") portable.rename(opath, npath) odebug = DebugValues["ssl_ca_file"] DebugValues["ssl_ca_file"] = odebug.replace("image0", "new.image") self.pkg("-R {0} refresh --full test".format(npath)) # Listing the test publisher causes its cert and key to be # validated. self.pkg("-R {0} publisher test".format(npath)) assert os.path.join("new.image", "var", "pkg", "ssl") in \ self.output # Restore image to original location. portable.rename(npath, opath) DebugValues["ssl_ca_file"] = odebug # verify that we can reach the repository using a HTTPS-capable # HTTP proxy. self.image_create() self.seed_ta_dir("ta7") self.pkg("set-publisher --proxy {proxy} " "-k {key} -c {cert} -p {url}".format( url=self.acurl1, cert=os.path.join(self.cs_dir, self.get_cli_cert("test")), key=os.path.join(self.keys_dir, self.get_cli_key("test")), proxy=self.proxyurl)) self.pkg("install example_pkg") # Now try to use the bad proxy, ensuring that we cannot set # the publisher (and verifying that we were indeed using the # proxy previously) bad_proxyurl = self.proxyurl.replace(str(self.proxy_port), str(self.bad_proxy_port)) self.image_create() self.seed_ta_dir("ta7") self.pkg("set-publisher --proxy {proxy} " "-k {key} -c {cert} -p {url}".format( url=self.acurl1, cert=os.path.join(self.cs_dir, self.get_cli_cert("test")), key=os.path.join(self.keys_dir, self.get_cli_key("test")), proxy=bad_proxyurl), exit=1) # Set the bad proxy in the image, verify we can't refresh, # then use an OS environment override to force the use of a # good proxy. self.pkg("set-publisher --no-refresh --proxy {proxy} " "-k {key} -c {cert} -g {url} test".format( url=self.acurl1, cert=os.path.join(self.cs_dir, self.get_cli_cert("test")), key=os.path.join(self.keys_dir, self.get_cli_key("test")), proxy=bad_proxyurl), exit=0) self.pkg("refresh", exit=1) proxy_env = {"https_proxy": self.proxyurl} self.pkg("refresh", env_arg=proxy_env) self.pkg("install example_pkg", env_arg=proxy_env)
portable.remove(tmpfile) raise UpdateLogException, \ "Package %s is already in the catalog" % \ c tfile.write(c) # Write the new entries to the catalog tfile.seek(0, os.SEEK_END) tfile.writelines(add_lines) if len(unknown_lines) > 0: tfile.writelines(unknown_lines) tfile.close() pfile.close() os.chmod(tmpfile, catalog.ServerCatalog.file_mode) portable.rename(tmpfile, catpath) # Now re-write npkgs and Last-Modified in attributes file afile = file(os.path.normpath(os.path.join(path, "attrs")), "r") attrre = re.compile('^S ([^:]*): (.*)') for entry in afile: m = attrre.match(entry) if m != None: attrs[m.group(1)] = m.group(2) afile.close() # Update the attributes we care about attrs["npkgs"] = npkgs + added
if e.errno != errno.ENOENT: raise else: return try: portable.chown(temp, owner, group) except OSError, e: if e.errno != errno.EPERM: raise # XXX There's a window where final_path doesn't exist, but we # probably don't care. if do_content and old_path: try: portable.rename(final_path, old_path) except OSError, e: if e.errno != errno.ENOENT: # Only care if file isn't gone already. raise # This is safe even if temp == final_path. portable.rename(temp, final_path) # Handle timestamp if specified (and content was installed). if do_content and "timestamp" in self.attrs: t = misc.timestamp_to_time(self.attrs["timestamp"]) try: os.utime(final_path, (t, t)) except OSError, e: if e.errno != errno.EACCES:
class UpdateLog(object): """The update log is a mechanism that allows clients and servers to make incremental updates to their package catalogs. The server logs whether it has added or removed a package, the time when the action occurred, and the name of the package added or removed. The client requests a list of actions that have been applied to the server's catalog since a particular time in the past. The server is then able to send this list of actions, allowing the client to apply these changes to its catalog. This allows the client to obtain incremental updates to its catalog, instead of having to download an entire (and largely duplicated) catalog each time a refresh is requested. The UpdateLog must have an associated catalog; however, Catalogs are not required to have an UpdateLog. The UpdateLog allows catalogs to support incremental updates. The catalog format is a + or -, an isoformat timestamp, and a catalog entry in server-side format. They must be in order and separated by spaces.""" def __init__(self, update_root, cat, maxfiles=336): """Create an instance of the UpdateLog. "update_root" is the root directory for the update log files. maxfiles is the maximum number of logfiles that the UpdateLog will keep. A new file is added for each hour in which there is an update. The default value of 336 means that we keep 336 hours, or 14 days worth of log history.""" self.rootdir = update_root self.maxfiles = maxfiles self.catalog = cat self.first_update = None self.last_update = None self.curfiles = 0 self.logfiles = [] self.logfile_size = {} self.updatelog_lock = threading.Lock() if not os.path.exists(update_root): os.makedirs(update_root) self._setup_logfiles() def add_package(self, pfmri, critical=False): """Record that the catalog has added "pfmri".""" self.updatelog_lock.acquire() try: # First add FMRI to catalog ts = self.catalog.add_fmri(pfmri, critical) # Now add update to updatelog self._check_logs() if critical: entry_type = "C" else: entry_type = "V" # The format for catalog C and V records is described # in the docstrings for the Catalog class. logstr = "+ %s %s %s\n" % \ (ts.isoformat(), entry_type, pfmri.get_fmri(anarchy=True)) self.__append_to_log(logstr) self.last_update = ts finally: self.updatelog_lock.release() return ts def _begin_log(self): """Return the path to a logfile. If we haven't written any updates yet, do some additional bookkeeping.""" filenm = time.strftime("%Y%m%d%H") ftime = datetime.datetime(*time.strptime(filenm, "%Y%m%d%H")[0:6]) delta = datetime.timedelta(hours=1) path = os.path.join(self.rootdir, filenm) if filenm not in self.logfiles: self.logfiles.append(filenm) self.curfiles += 1 if not self.first_update: self.first_update = ftime return path, filenm def _check_logs(self): """Check to see if maximum number of logfiles has been exceeded. If so, rotate the logs. Also, if a log is open, check to see if it needs to be closed.""" if self.curfiles < self.maxfiles: return excess = self.curfiles - self.maxfiles to_remove = self.logfiles[0:excess] for r in to_remove: filepath = os.path.join(self.rootdir, "%s" % r) os.unlink(filepath) self.curfiles -= 1 if r in self.logfile_size: del self.logfile_size[r] del self.logfiles[0:excess] self.first_update = datetime.datetime( *time.strptime(self.logfiles[0], "%Y%m%d%H")[0:6]) def __append_to_log(self, logstr): """Write the string logstr into the proper update log. This routine copies the existing contents of the log into a temporary file, and then renames the new logfile into place.""" # Get the path to the logfile, as well as its name logpath, logfile = self._begin_log() # Create a temporary file for new data tmp_num, tmpfile = tempfile.mkstemp(dir=self.rootdir) # Use fdopen since mkstemp gives us a filehandle try: tfile = os.fdopen(tmp_num, "w") except OSError: portable.remove(tmpfile) raise # Try to open logfile readonly. If it doesn't exist, # create a new one, and then re-open it readonly. try: lfile = file(logpath, "rb") except IOError, e: if e.errno == errno.ENOENT: # Creating an empty file file(logpath, "wb").close() lfile = file(logpath, "rb") else: portable.remove(tmpfile) raise # Make sure we're at the start of the file lfile.seek(0) # Write existing lines in old file into new file. # Then append the new line. try: for entry in lfile: tfile.write(entry) tfile.write(logstr) except Exception: portable.remove(tmpfile) raise # If this routine is updating a logfile that already # has size information, replace the size information # with the size of the new file. Use tell to grab the # offset at the end of the file, instead of stat. # (At least in this case) if logfile in self.logfile_size: self.logfile_size[logfile] = tfile.tell() lfile.close() tfile.close() # Change the permissions on the tempfile, since # mkstemp uses mode 600. Rename the tempfile into # place as the new logfile. try: os.chmod(tmpfile, catalog.ServerCatalog.file_mode) portable.rename(tmpfile, logpath) except EnvironmentError: portable.remove(tmpfile) raise
def lookup(self, hashval, opener=False): """Find the file for hashval. The "hashval" parameter contains the name of the file to be found. The "opener" parameter determines whether the function will return a path or an open file handle.""" cur_full_path, dest_full_path = self.__select_path(hashval, True) if not cur_full_path: return None # If the depot isn't readonly and the file isn't in the location # that the primary layout thinks it should be, try to move the # file into the right place. if dest_full_path != cur_full_path and not self.readonly: p_sdir = os.path.dirname(cur_full_path) try: # Attempt to move the file from the old location # to the preferred location. try: portable.rename(cur_full_path, dest_full_path) except OSError, e: if e.errno != errno.ENOENT: raise p_ddir = os.path.dirname( dest_full_path) if os.path.isdir(p_ddir): raise try: os.makedirs(p_ddir) except EnvironmentError, e: if e.errno == errno.EACCES or \ e.errno == errno.EROFS: raise FMPermissionsException( e.filename) # If directory creation failed # due to EEXIST, but the entry # it failed for isn't the # immediate parent, assume # there's a larger problem # and re-raise the exception. # For file_manager, this is # believed to be unlikely. if not (e.errno == errno.EEXIST and e.filename == p_ddir): raise portable.rename(cur_full_path, dest_full_path) # Since the file has been moved, point at the # new destination *before* attempting to remove # the (now possibly empty) parent directory of # of the source file. cur_full_path = dest_full_path # This may fail because other files can still # exist in the parent path for the source, so # must be done last. os.removedirs(p_sdir)
def _recv_updates(filep, path, cts): """A static method that takes a file-like object, a path, and a timestamp. It reads a stream as an incoming updatelog and modifies the catalog on disk.""" if not os.path.exists(path): os.makedirs(path) # Build a list of FMRIs that this update would add, check to # make sure that they aren't present in the catalog, then # append the fmris. mts = catalog.ts_to_datetime(cts) cts = mts pts = mts added = 0 npkgs = 0 add_lines = [] unknown_lines = [] bad_fmri = None attrs = {} for s in filep: l = s.split(None, 3) if len(l) < 4: continue elif l[2] not in catalog.known_prefixes: # Add unknown line directly to catalog. # This can be post-processed later, when it # becomes known. # # XXX Notify user that unknown entry was added? ts = catalog.ts_to_datetime(l[1]) if ts > cts: if ts > mts: pts = mts mts = ts line = "{0} {1}\n".format(l[2], l[3]) unknown_lines.append(line) elif l[0] == "+": # This is a known entry type. # Create a list of FMRIs to add, since # additional inspection is required ts = catalog.ts_to_datetime(l[1]) if ts > cts: if ts > mts: pts = mts mts = ts # The format for C and V records # is described in the Catalog's # docstring. if l[2] in tuple("CV"): try: f = fmri.PkgFmri(l[3]) except fmri.IllegalFmri as e: bad_fmri = e mts = pts continue line = "{0} {1} {2} {3}\n".format( l[2], "pkg", f.pkg_name, f.version) add_lines.append(line) added += 1 # If we got a parse error on FMRIs and transfer # wasn't truncated, raise a retryable transport if bad_fmri: raise bad_fmri # Verify that they aren't already in the catalog catpath = os.path.normpath(os.path.join(path, "catalog")) tmp_num, tmpfile = tempfile.mkstemp(dir=path) tfile = os.fdopen(tmp_num, 'w') try: pfile = file(catpath, "rb") except IOError as e: if e.errno == errno.ENOENT: # Creating an empty file file(catpath, "wb").close() pfile = file(catpath, "rb") else: tfile.close() portable.remove(tmpfile) raise pfile.seek(0) for c in pfile: if c[0] in tuple("CV"): npkgs += 1 if c in add_lines: pfile.close() tfile.close() portable.remove(tmpfile) raise UpdateLogException( "Package {0} is already in the catalog".format(c)) tfile.write(c) # Write the new entries to the catalog tfile.seek(0, os.SEEK_END) tfile.writelines(add_lines) if len(unknown_lines) > 0: tfile.writelines(unknown_lines) tfile.close() pfile.close() os.chmod(tmpfile, catalog.ServerCatalog.file_mode) portable.rename(tmpfile, catpath) # Now re-write npkgs and Last-Modified in attributes file afile = file(os.path.normpath(os.path.join(path, "attrs")), "r") attrre = re.compile('^S ([^:]*): (.*)') for entry in afile: m = attrre.match(entry) if m != None: attrs[m.group(1)] = m.group(2) afile.close() # Update the attributes we care about attrs["npkgs"] = npkgs + added attrs["Last-Modified"] = mts.isoformat() # Write attributes back out apath = os.path.normpath(os.path.join(path, "attrs")) tmp_num, tmpfile = tempfile.mkstemp(dir=path) tfile = os.fdopen(tmp_num, 'w') for a in attrs.keys(): s = "S {0}: {1}\n".format(a, attrs[a]) tfile.write(s) tfile.close() os.chmod(tmpfile, catalog.ServerCatalog.file_mode) portable.rename(tmpfile, apath) return True
except IOError: try: os.makedirs(os.path.dirname(mfst_path)) except OSError, e: if e.errno != errno.EEXIST: raise mfile = file(tmp_path, "w") # # We specifically avoid sorting manifests before writing # them to disk-- there's really no point in doing so, since # we'll sort actions globally during packaging operations. # mfile.write(self.tostr_unsorted()) mfile.close() portable.rename(tmp_path, mfst_path) def get_variants(self, name): if name not in self.attributes: return None variants = self.attributes[name] if not isinstance(variants, str): return variants return [variants] def get_all_variants(self): """Return a dictionary mapping variant tags to their values.""" return variant.VariantSets(dict(( (name, self.attributes[name]) for name in self.attributes if name.startswith("variant.")
def test_01_basics(self): """Test that adding a https publisher works and that a package can be installed from that publisher.""" self.ac.start() # Test that creating an image using a HTTPS repo without # providing any keys or certificates fails. self.assertRaises(TransportFailures, self.image_create, self.acurl) self.pkg_image_create(repourl=self.acurl, exit=1) api_obj = self.image_create() # Test that adding a HTTPS repo fails if the image does not # contain the trust anchor to verify the server's identity. self.pkg("set-publisher -k %(key)s -c %(cert)s -p %(url)s" % { "url": self.acurl, "cert": os.path.join(self.cs_dir, "cs1_ta6_cert.pem"), "key": os.path.join(self.keys_dir, "cs1_ta6_key.pem"), }, exit=1) # Add the trust anchor needed to verify the server's identity to # the image. self.seed_ta_dir("ta7") self.pkg("set-publisher -k %(key)s -c %(cert)s -p %(url)s" % { "url": self.acurl, "cert": os.path.join(self.cs_dir, "cs1_ta6_cert.pem"), "key": os.path.join(self.keys_dir, "cs1_ta6_key.pem"), }) api_obj = self.get_img_api_obj() self._api_install(api_obj, ["example_pkg"]) # Verify that if the image location changes, SSL operations # are still possible. (The paths to key and cert should be # updated on load.) opath = self.img_path() npath = opath.replace("image0", "new.image") portable.rename(opath, npath) odebug = DebugValues["ssl_ca_file"] DebugValues["ssl_ca_file"] = odebug.replace("image0", "new.image") self.pkg("-R %s refresh --full test" % npath) # Listing the test publisher causes its cert and key to be # validated. self.pkg("-R %s publisher test" % npath) assert os.path.join("new.image", "var", "pkg", "ssl") in \ self.output # Restore image to original location. portable.rename(npath, opath) DebugValues["ssl_ca_file"] = odebug # verify that we can reach the repository using a HTTPS-capable # HTTP proxy. self.image_create() self.seed_ta_dir("ta7") self.pkg("set-publisher --proxy %(proxy)s " "-k %(key)s -c %(cert)s -p %(url)s" % { "url": self.acurl, "cert": os.path.join(self.cs_dir, "cs1_ta6_cert.pem"), "key": os.path.join(self.keys_dir, "cs1_ta6_key.pem"), "proxy": self.proxyurl}) self.pkg("install example_pkg") # Now try to use the bad proxy, ensuring that we cannot set # the publisher (and verifying that we were indeed using the # proxy previously) bad_proxyurl = self.proxyurl.replace(str(self.proxy_port), str(self.bad_proxy_port)) self.image_create() self.seed_ta_dir("ta7") self.pkg("set-publisher --proxy %(proxy)s " "-k %(key)s -c %(cert)s -p %(url)s" % { "url": self.acurl, "cert": os.path.join(self.cs_dir, "cs1_ta6_cert.pem"), "key": os.path.join(self.keys_dir, "cs1_ta6_key.pem"), "proxy": bad_proxyurl}, exit=1) # Set the bad proxy in the image, verify we can't refresh, # then use an OS environment override to force the use of a # good proxy. self.pkg("set-publisher --no-refresh --proxy %(proxy)s " "-k %(key)s -c %(cert)s -g %(url)s test" % { "url": self.acurl, "cert": os.path.join(self.cs_dir, "cs1_ta6_cert.pem"), "key": os.path.join(self.keys_dir, "cs1_ta6_key.pem"), "proxy": bad_proxyurl}, exit=0) self.pkg("refresh", exit=1) proxy_env = {"https_proxy": self.proxyurl} self.pkg("refresh", env_arg=proxy_env) self.pkg("install example_pkg", env_arg=proxy_env)
except tx.InvalidContentException, e: mfile.subtract_progress(e.size) e.request = s repostats.record_error() failedreqs.append(s) failures.append(e) if not filelist: filelist = failedreqs continue final_path = os.path.normpath( os.path.join(completed_dir, misc.hash_file_name(s))) finaldir = os.path.dirname(final_path) self._makedirs(finaldir) portable.rename(dl_path, final_path) mfile.make_openers(s, final_path) # Return if everything was successful if not filelist and len(errlist) == 0: return if len(failedreqs) > 0 and len(failures) > 0: failures = filter(lambda x: x.request in failedreqs, failures) tfailurex = tx.TransportFailures() for f in failures: tfailurex.append(f) raise tfailurex def get_versions(self, pub):
tfile.write(entry) tfile.write(pkgstr) except Exception: portable.remove(tmpfile) raise # Close our open files pfile.close() tfile.close() # Set the permissions on the tempfile correctly. # Mkstemp creates files as 600. Rename the new # cataog on top of the old one. try: os.chmod(tmpfile, self.file_mode) portable.rename(tmpfile, self.catalog_file) except EnvironmentError: portable.remove(tmpfile) raise @staticmethod def cache_fmri(d, pfmri, pub, known=True): """Store the fmri in a data structure 'd' for fast lookup. 'd' is a dict that maps each package name to another dictionary, itself mapping: * each version string, which maps to a tuple of: -- the fmri object -- a dict of publisher prefixes with each value indicating catalog presence
except IOError: try: os.makedirs(os.path.dirname(mfst_path)) except OSError, e: if e.errno != errno.EEXIST: raise mfile = file(tmp_path, "w") # # We specifically avoid sorting manifests before writing # them to disk-- there's really no point in doing so, since # we'll sort actions globally during packaging operations. # mfile.write(self.tostr_unsorted()) mfile.close() portable.rename(tmp_path, mfst_path) def get_variants(self, name): if name not in self.attributes: return None variants = self.attributes[name] if not isinstance(variants, str): return variants return [variants] def get_all_variants(self): """Return a dictionary mapping variant tags to their values.""" return variant.VariantSets( dict(((name, self.attributes[name]) for name in self.attributes if name.startswith("variant."))))
# moved to a new location. To prevent disruption of # service or other race conditions, rename the source # file into the old place first. try: portable.rename(src_path, cur_full_path) except EnvironmentError, e: if e.errno == errno.EACCES or \ e.errno == errno.EROFS: raise FMPermissionsException(e.filename) raise src_path = cur_full_path p_dir = os.path.dirname(dest_full_path) try: # Move the file into place. portable.rename(src_path, dest_full_path) except EnvironmentError, e: if e.errno == errno.ENOENT and not os.path.isdir(p_dir): try: os.makedirs(p_dir) except EnvironmentError, e: if e.errno == errno.EACCES or \ e.errno == errno.EROFS: raise FMPermissionsException( e.filename) # If directory creation failed due to # EEXIST, but the entry it failed for # isn't the immediate parent, assume # there's a larger problem and re-raise # the exception. For file_manager, this # is believed to be unlikely.
# and drive on. if e.errno != errno.ENOENT: raise else: return try: portable.chown(temp, owner, group) except OSError, e: if e.errno != errno.EPERM: raise # XXX There's a window where final_path doesn't exist, but we # probably don't care. if do_content and pres_type == "renameold": portable.rename(final_path, old_path) # This is safe even if temp == final_path. portable.rename(temp, final_path) # Handle timestamp if specified (and content was installed). if do_content and "timestamp" in self.attrs: t = misc.timestamp_to_time(self.attrs["timestamp"]) try: os.utime(final_path, (t, t)) except OSError, e: if e.errno != errno.EACCES: raise # On Windows, the time cannot be changed on a # read-only file
self._verify_content(mfile[s][0], dl_path) except tx.InvalidContentException, e: mfile.subtract_progress(e.size) e.request = s repostats.record_error() failedreqs.append(s) failures.append(e) if not filelist: filelist = failedreqs continue final_path = os.path.normpath(os.path.join(completed_dir, misc.hash_file_name(s))) finaldir = os.path.dirname(final_path) self._makedirs(finaldir) portable.rename(dl_path, final_path) mfile.make_openers(s, final_path) # Return if everything was successful if not filelist and len(errlist) == 0: return if len(failedreqs) > 0 and len(failures) > 0: failures = filter(lambda x: x.request in failedreqs, failures) tfailurex = tx.TransportFailures() for f in failures: tfailurex.append(f) raise tfailurex def get_versions(self, pub):
portable.remove(tmpfile) raise UpdateLogException, \ "Package %s is already in the catalog" % \ c tfile.write(c) # Write the new entries to the catalog tfile.seek(0, os.SEEK_END) tfile.writelines(add_lines) if len(unknown_lines) > 0: tfile.writelines(unknown_lines) tfile.close() pfile.close() os.chmod(tmpfile, catalog.ServerCatalog.file_mode) portable.rename(tmpfile, catpath) # Now re-write npkgs and Last-Modified in attributes file afile = file(os.path.normpath(os.path.join(path, "attrs")), "r") attrre = re.compile('^S ([^:]*): (.*)') for entry in afile: m = attrre.match(entry) if m != None: attrs[m.group(1)] = m.group(2) afile.close() # Update the attributes we care about attrs["npkgs"] = npkgs + added attrs["Last-Modified"] = mts.isoformat()