def __in_gz(self, ignore_errors=False): """Check if we're executing in the global zone. Note that this doesn't tell us anything about the image we're manipulating, just the environment that we're running in.""" if self.__in_gz_cached != None: return self.__in_gz_cached # check if we're running in the gz try: self.__in_gz_cached = (_zonename() == ZONE_GLOBAL) except OSError as e: # W0212 Access to a protected member # pylint: disable=W0212 if ignore_errors: # default to being in the global zone return True raise apx._convert_error(e) except apx.LinkedImageException as e: if ignore_errors: # default to being in the global zone return True raise e return self.__in_gz_cached
def __list_zones_cached(self, nocache=False, ignore_errors=False): """List the zones associated with the current image. Since this involves forking and running zone commands, cache the results.""" # if nocache is set then delete any cached children if nocache: self.__zoneadm_list_cache = None # try to return the cached children if self.__zoneadm_list_cache != None: assert type(self.__zoneadm_list_cache) == list return self.__zoneadm_list_cache # see if the target image supports zones if not self.__zones_supported(): self.__zoneadm_list_cache = [] return self.__list_zones_cached() # zones are only visible when running in the global zone if not self.__in_gz(ignore_errors=ignore_errors): self.__zoneadm_list_cache = [] return self.__list_zones_cached() # find zones try: zdict = _list_zones(self.__img.root) except OSError, e: # W0212 Access to a protected member # pylint: disable=W0212 if ignore_errors: # don't cache the result return [] raise apx._convert_error(e)
def __rpc_server_fork(self, img_path, server_cmd_pipe, server_prog_pipe_fobj): """Fork off a "pkg remote" server process. 'img_path' is the path to the image to manipulate. 'server_cmd_pipe' is the server side of the command pipe which the server will use to receive RPC requests. 'server_prog_pipe_fobj' is the server side of the progress pipe which the server will write to to indicate progress.""" pkg_cmd = pkg.misc.api_pkgcmd() + [ "-R", img_path, "--runid=%s" % global_settings.client_runid, "remote", "--ctlfd=%s" % server_cmd_pipe, "--progfd=%s" % server_prog_pipe_fobj.fileno(), ] self.__debug_msg("RPC server cmd: %s" % (" ".join(pkg_cmd))) # create temporary files to log standard output and error from # the RPC server. fstdout = tempfile.TemporaryFile() fstderr = tempfile.TemporaryFile() try: p = pkg.pkgsubprocess.Popen(pkg_cmd, stdout=fstdout, stderr=fstderr) except OSError, e: # Access to protected member; pylint: disable=W0212 raise apx._convert_error(e)
def __rpc_server_fork(self, img_path, server_cmd_pipe, server_prog_pipe_fobj): """Fork off a "pkg remote" server process. 'img_path' is the path to the image to manipulate. 'server_cmd_pipe' is the server side of the command pipe which the server will use to receive RPC requests. 'server_prog_pipe_fobj' is the server side of the progress pipe which the server will write to to indicate progress.""" pkg_cmd = pkg.misc.api_pkgcmd() + [ "-R", img_path, "--runid={0}".format(global_settings.client_runid), "remote", "--ctlfd={0}".format(server_cmd_pipe), "--progfd={0}".format(server_prog_pipe_fobj.fileno()), ] self.__debug_msg("RPC server cmd: {0}".format(" ".join(pkg_cmd))) # create temporary files to log standard output and error from # the RPC server. fstdout = tempfile.TemporaryFile() fstderr = tempfile.TemporaryFile() try: # Under Python 3.4, os.pipe() returns non-inheritable # file descriptors. On UNIX, subprocess makes file # descriptors of the pass_fds parameter inheritable. # Since our pkgsubprocess use posix_pspawn* and doesn't # have an interface for pass_fds, we reuse the Python # module subprocess here. # unexpected-keyword-arg 'pass_fds'; # pylint: disable=E1123 # Redefinition of p type if six.PY2: p = pkg.pkgsubprocess.Popen(pkg_cmd, stdout=fstdout, stderr=fstderr) else: p = subprocess.Popen(pkg_cmd, stdout=fstdout, stderr=fstderr, pass_fds=(server_cmd_pipe, server_prog_pipe_fobj.fileno())) except OSError as e: # Access to protected member; pylint: disable=W0212 raise apx._convert_error(e) # initalization successful, update RPC server state self.__rpc_server_proc = p self.__rpc_server_fstdout = fstdout self.__rpc_server_fstderr = fstderr self.__rpc_server_prog_pipe_fobj = server_prog_pipe_fobj
def _load(self, fobj): """Load a json encoded representation of a plan description from the specified file object.""" assert self.state == UNEVALUATED try: fobj.seek(0) state = json.load(fobj, encoding="utf-8") except OSError, e: # Access to protected member; pylint: disable=W0212 raise apx._convert_error(e)
def _save(self, fobj, reset_volatiles=False): """Save a json encoded representation of this plan description objects into the specified file object.""" state = PlanDescription.getstate(self, reset_volatiles=reset_volatiles) try: fobj.truncate() json.dump(state, fobj, encoding="utf-8") fobj.flush() except OSError, e: # Access to protected member; pylint: disable=W0212 raise apx._convert_error(e)
def __list_zones_cached(self, nocache=False, ignore_errors=False): """List the zones associated with the current image. Since this involves forking and running zone commands, cache the results.""" # if nocache is set then delete any cached children if nocache: self.__zoneadm_list_cache = None # try to return the cached children if self.__zoneadm_list_cache != None: assert type(self.__zoneadm_list_cache) == list return self.__zoneadm_list_cache # see if the target image supports zones if not self.__zones_supported(): self.__zoneadm_list_cache = [] return self.__list_zones_cached() # zones are only visible when running in the global zone if not self.__in_gz(ignore_errors=ignore_errors): self.__zoneadm_list_cache = [] return self.__list_zones_cached() # find zones try: zdict = _list_zones(self.__img.root, self.__linked.get_path_transform()) except OSError as e: # W0212 Access to a protected member # pylint: disable=W0212 if ignore_errors: # don't cache the result return [] raise apx._convert_error(e) except apx.LinkedImageException as e: if ignore_errors: # don't cache the result return [] raise e # convert zone names into into LinkedImageName objects zlist = [] # state is unused # pylint: disable=W0612 for zone, (path, state) in six.iteritems(zdict): lin = li.LinkedImageName("{0}:{1}".format(self.__pname, zone)) zlist.append([lin, path]) self.__zoneadm_list_cache = zlist return self.__list_zones_cached()
def _save(self, fobj, reset_volatiles=False): """Save a json encoded representation of this plan description objects into the specified file object.""" state = PlanDescription.getstate(self, reset_volatiles=reset_volatiles) try: fobj.truncate() json.dump(state, fobj) fobj.flush() except OSError as e: # Access to protected member; pylint: disable=W0212 raise apx._convert_error(e) del state
def _load(self, fobj): """Load a json encoded representation of a plan description from the specified file object.""" assert self.state == UNEVALUATED try: fobj.seek(0) state = json.load(fobj, object_hook=pkg.misc.json_hook) except OSError as e: # Access to protected member; pylint: disable=W0212 raise apx._convert_error(e) PlanDescription.setstate(self, state) del state
def __in_gz(self, ignore_errors=False): """Check if we're executing in the global zone. Note that this doesn't tell us anything about the image we're manipulating, just the environment that we're running in.""" if self.__in_gz_cached != None: return self.__in_gz_cached # check if we're running in the gz try: self.__in_gz_cached = (_zonename() == ZONE_GLOBAL) except OSError, e: # W0212 Access to a protected member # pylint: disable=W0212 if ignore_errors: # default to being in the global zone return True raise apx._convert_error(e)
def __rpc_server_fork(self, img_path, server_cmd_pipe, server_prog_pipe_fobj): """Fork off a "pkg remote" server process. 'img_path' is the path to the image to manipulate. 'server_cmd_pipe' is the server side of the command pipe which the server will use to receive RPC requests. 'server_prog_pipe_fobj' is the server side of the progress pipe which the server will write to to indicate progress.""" pkg_cmd = pkg.misc.api_pkgcmd() + [ "-R", img_path, "--runid={0}".format(global_settings.client_runid), "remote", "--ctlfd={0}".format(server_cmd_pipe), "--progfd={0}".format(server_prog_pipe_fobj.fileno()), ] self.__debug_msg("RPC server cmd: {0}".format(" ".join(pkg_cmd))) # create temporary files to log standard output and error from # the RPC server. fstdout = tempfile.TemporaryFile() fstderr = tempfile.TemporaryFile() try: p = pkg.pkgsubprocess.Popen(pkg_cmd, stdout=fstdout, stderr=fstderr) except OSError as e: # Access to protected member; pylint: disable=W0212 raise apx._convert_error(e) # initalization successful, update RPC server state self.__rpc_server_proc = p self.__rpc_server_fstdout = fstdout self.__rpc_server_fstderr = fstderr self.__rpc_server_prog_pipe_fobj = server_prog_pipe_fobj
def resolve(args, img_dir): """Take a list of manifests and resolve any file dependencies, first against the other published manifests and then against what is installed on the machine.""" out_dir = None echo_manifest = False output_to_screen = False suffix = None verbose = False use_system_to_resolve = True constraint_files = [] extra_external_info = False try: opts, pargs = getopt.getopt(args, "d:e:Emos:Sv") except getopt.GetoptError as e: usage(_("illegal global option -- {0}").format(e.opt)) for opt, arg in opts: if opt == "-d": out_dir = arg elif opt == "-e": constraint_files.append(arg) elif opt == "-E": extra_external_info = True elif opt == "-m": echo_manifest = True elif opt == "-o": output_to_screen = True elif opt == "-s": suffix = arg elif opt == "-S": use_system_to_resolve = False elif opt == "-v": verbose = True if (out_dir or suffix) and output_to_screen: usage(_("-o cannot be used with -d or -s")) manifest_paths = [os.path.abspath(fp) for fp in pargs] for manifest in manifest_paths: if not os.path.isfile(manifest): usage(_("The manifest file {0} could not be found.").format( manifest), retcode=2) if out_dir: out_dir = os.path.abspath(out_dir) if not os.path.isdir(out_dir): usage(_("The output directory {0} is not a directory.").format( out_dir), retcode=2) provided_image_dir = True pkg_image_used = False if img_dir == None: orig_cwd = None try: orig_cwd = os.getcwd() except OSError: # May be unreadable by user or have other problem. pass img_dir, provided_image_dir = api.get_default_image_root( orig_cwd=orig_cwd) if os.environ.get("PKG_IMAGE"): # It's assumed that this has been checked by the above # function call and hasn't been removed from the # environment. pkg_image_used = True if not img_dir: error( _("Could not find image. Use the -R option or set " "$PKG_IMAGE to the\nlocation of an image.")) return 1 system_patterns = misc.EmptyI if constraint_files: system_patterns = [] for f in constraint_files: try: with open(f, "rb") as fh: for l in fh: l = l.strip() if l and not l.startswith("#"): system_patterns.append(l) except EnvironmentError as e: if e.errno == errno.ENOENT: error("{0}: '{1}'".format(e.args[1], e.filename), cmd="resolve") return 1 raise api_errors._convert_error(e) if not system_patterns: error( _("External package list files were provided but " "did not contain any fmri patterns.")) return 1 elif use_system_to_resolve: system_patterns = ["*"] # Becuase building an ImageInterface permanently changes the cwd for # python, it's necessary to do this step after resolving the paths to # the manifests. try: api_inst = api.ImageInterface(img_dir, CLIENT_API_VERSION, progress.QuietProgressTracker(), None, PKG_CLIENT_NAME, exact_match=provided_image_dir) except api_errors.ImageNotFoundException as e: if e.user_specified: if pkg_image_used: error( _("No image rooted at '{0}' " "(set by $PKG_IMAGE)").format(e.user_dir)) else: error(_("No image rooted at '{0}'").format(e.user_dir)) else: error(_("No image found.")) return 1 except api_errors.PermissionsException as e: error(e) return 1 except api_errors.ImageFormatUpdateNeeded as e: # This should be a very rare error case. format_update_error(e) return 1 try: pkg_deps, errs, unused_fmris, external_deps = \ dependencies.resolve_deps(manifest_paths, api_inst, system_patterns, prune_attrs=not verbose) except (actions.MalformedActionError, actions.UnknownActionError) as e: error( _("Could not parse one or more manifests because of " "the following line:\n{0}").format(e.actionstr)) return 1 except dependencies.DependencyError as e: error(e) return 1 except api_errors.ApiException as e: error(e) return 1 ret_code = 0 if output_to_screen: ret_code = pkgdeps_to_screen(pkg_deps, manifest_paths, echo_manifest) elif out_dir: ret_code = pkgdeps_to_dir(pkg_deps, manifest_paths, out_dir, suffix, echo_manifest) else: ret_code = pkgdeps_in_place(pkg_deps, manifest_paths, suffix, echo_manifest) if extra_external_info: if constraint_files and unused_fmris: msg( _("\nThe following fmris matched a pattern in a " "constraint file but were not used in\ndependency " "resolution:")) for pfmri in sorted(unused_fmris): msg("\t{0}".format(pfmri)) if not constraint_files and external_deps: msg(_("\nThe following fmris had dependencies resolve " "to them:")) for pfmri in sorted(external_deps): msg("\t{0}".format(pfmri)) for e in errs: if ret_code == 0: ret_code = 1 emsg(e) return ret_code
warnings.filterwarnings("ignore", category=ResourceWarning) try: __ret = main_func() except PipeError: # We don't want to display any messages here to prevent # possible further broken pipe (EPIPE) errors. cleanup(no_msg=True) __ret = pkgdefs.EXIT_OOPS except (KeyboardInterrupt, api_errors.CanceledException): cleanup() __ret = pkgdefs.EXIT_OOPS except (actions.ActionError, RuntimeError, api_errors.ApiException) as _e: error(_e) cleanup() __ret = pkgdefs.EXIT_OOPS except EnvironmentError as _e: error(api_errors._convert_error(_e)) cleanup() __ret = pkgdefs.EXIT_OOPS except SystemExit as _e: cleanup() raise _e except: traceback.print_exc() error(misc.get_traceback_message()) __ret = 99 sys.exit(__ret) # Vim hints # vim:ts=8:sw=8:et:fdm=marker
# Make all warnings be errors. import warnings warnings.simplefilter('error') if six.PY3: # disable ResourceWarning: unclosed file warnings.filterwarnings("ignore", category=ResourceWarning) try: __ret = main_func() except (pkg.actions.ActionError, trans.TransactionError, RuntimeError, pkg.fmri.FmriError, apx.ApiException) as __e: print("pkgmerge: {0}".format(__e), file=sys.stderr) __ret = EXIT_OOPS except (PipeError, KeyboardInterrupt): __ret = EXIT_OOPS except SystemExit as __e: raise __e except EnvironmentError as __e: error(str(apx._convert_error(__e))) __ret = EXIT_OOPS except Exception as __e: traceback.print_exc() error(misc.get_traceback_message(), exitcode=None) __ret = 99 finally: cleanup() sys.exit(__ret) # Vim hints # vim:ts=8:sw=8:et:fdm=marker
def archive_pkgs(pargs, target, list_newest, all_versions, all_timestamps, keep_compresed, raw, recursive, dry_run, dest_xport_cfg, src_uri): """Retrieve source package data completely and then archive it.""" global cache_dir, download_start, xport, xport_cfg target = os.path.abspath(target) if os.path.exists(target): error(_("Target archive '%s' already " "exists.") % target) abort() # Open the archive early so that permissions failures, etc. can be # detected before actual work is started. if not dry_run: pkg_arc = pkg.p5p.Archive(target, mode="w") basedir = tempfile.mkdtemp(dir=temp_root, prefix=global_settings.client_name + "-") tmpdirs.append(basedir) # Retrieve package data for all publishers. any_unmatched = [] any_matched = [] invalid_manifests = [] total_processed = 0 arc_bytes = 0 archive_list = [] for src_pub in xport_cfg.gen_publishers(): # Root must be per publisher on the off chance that multiple # publishers have the same package. xport_cfg.pkg_root = os.path.join(basedir, src_pub.prefix) tracker = get_tracker() msg(_("Retrieving packages for publisher %s ...") % src_pub.prefix) if pargs == None or len(pargs) == 0: usage(_("must specify at least one pkgfmri")) matches = get_matches(src_pub, tracker, xport, pargs, any_unmatched, any_matched, all_versions, all_timestamps, recursive) if not matches: # No matches at all; nothing to do for this publisher. continue # First, retrieve the manifests and calculate package transfer # sizes. npkgs = len(matches) get_bytes = 0 get_files = 0 if not recursive: msg(_("Retrieving and evaluating %d package(s)...") % npkgs) tracker.manifest_fetch_start(npkgs) good_matches = [] for f in matches: try: m = get_manifest(f, xport_cfg) except apx.InvalidPackageErrors, e: invalid_manifests.extend(e.errors) continue good_matches.append(f) getb, getf, arcb, arccb = get_sizes(m) get_bytes += getb get_files += getf # Since files are going into the archive, progress # can be tracked in terms of compressed bytes for # the package files themselves. arc_bytes += arccb # Also include the the manifest file itself in the # amount of bytes to archive. try: fs = os.stat(m.pathname) arc_bytes += fs.st_size except EnvironmentError, e: raise apx._convert_error(e) tracker.manifest_fetch_progress(completion=True)
system_patterns = [] for f in constraint_files: try: with open(f, "rb") as fh: for l in fh: l = l.strip() if l and not l.startswith("#"): system_patterns.append( l) except EnvironmentError, e: if e.errno == errno.ENOENT: error("%s: '%s'" % (e.args[1], e.filename), cmd="resolve") return 1 raise api_errors._convert_error(e) if not system_patterns: error(_("External package list files were provided but " "did not contain any fmri patterns.")) return 1 elif use_system_to_resolve: system_patterns = ["*"] # Becuase building an ImageInterface permanently changes the cwd for # python, it's necessary to do this step after resolving the paths to # the manifests. try: api_inst = api.ImageInterface(img_dir, CLIENT_API_VERSION, progress.QuietProgressTracker(), None, PKG_CLIENT_NAME, exact_match=provided_image_dir) except api_errors.ImageNotFoundException, e:
def __place(self, hashval, src_path, pfunc): """Add the content at "src_path" to the files under the name "hashval". Returns the path to the inserted file.""" if self.readonly: raise NeedToModifyReadOnlyFileManager(hashval) cur_full_path, dest_full_path = \ self.__select_path(hashval, True) if cur_full_path and cur_full_path != dest_full_path: # The file is stored in an old location and needs to be # moved to a new location. To prevent disruption of # service or other race conditions, rename the source # file into the old place first. try: portable.rename(src_path, cur_full_path) except EnvironmentError as e: if e.errno == errno.EACCES or \ e.errno == errno.EROFS: raise FMPermissionsException(e.filename) raise src_path = cur_full_path while True: try: # Place the file. pfunc(src_path, dest_full_path) except EnvironmentError as e: p_dir = os.path.dirname(dest_full_path) if e.errno == errno.ENOENT and \ not os.path.isdir(p_dir): try: os.makedirs(p_dir) except EnvironmentError as e: if e.errno == errno.EACCES or \ e.errno == errno.EROFS: raise FMPermissionsException(e.filename) # If directory creation failed # due to EEXIST, but the entry # it failed for isn't the # immediate parent, assume # there's a larger problem and # re-raise the exception. For # file_manager, this is believed # to be unlikely. if not (e.errno == errno.EEXIST and e.filename == p_dir): raise # Parent directory created successsfully # so loop again to retry place. elif e.errno == errno.ENOENT and \ not os.path.exists(src_path): if os.path.exists(dest_full_path): # Item has already been moved # into cache by another process; # nothing more to do. (This # could happen during parallel # publication.) return dest_full_path raise FMInsertionFailure(src_path, dest_full_path) elif e.errno == errno.EACCES or \ e.errno == errno.EROFS: raise FMPermissionsException(e.filename) elif e.errno != errno.ENOENT: raise apx._convert_error(e) else: # Success! break # Attempt to remove the parent directory of the file's original # location to ensure empty directories aren't left behind. if cur_full_path: try: os.removedirs(os.path.dirname(cur_full_path)) except EnvironmentError as e: if e.errno == errno.ENOENT or \ e.errno == errno.EEXIST: pass elif e.errno == errno.EACCES or \ e.errno == errno.EROFS: raise FMPermissionsException(e.filename) else: raise # Return the location of the placed file to the caller. return dest_full_path
abort(err=e) # Dump data retrieved so far after each successful # republish to conserve space. try: shutil.rmtree(dest_xport_cfg.incoming_root) shutil.rmtree(pkgdir) if cache_dir in tmpdirs: # If cache_dir is listed in tmpdirs, # then it's safe to dump cache contents. # Otherwise, it's a user cache directory # and shouldn't be dumped. shutil.rmtree(cache_dir) misc.makedirs(cache_dir) except EnvironmentError, e: raise apx._convert_error(e) misc.makedirs(dest_xport_cfg.incoming_root) processed += 1 tracker.republish_end_pkg(f) tracker.republish_done() tracker.reset() if processed > 0: # If any packages were published, trigger an update of # the catalog. total_processed += processed dest_xport.publish_refresh_packages(targ_pub) # Prevent further use.