def download_dependencies(download_dir, pips_to_download, output_filename): if not pips_to_download: return # NOTE(aababilov): pip has issues with already downloaded files if sh.isdir(download_dir): for filename in sh.listdir(download_dir, files_only=True): sh.unlink(filename) else: sh.mkdir(download_dir) # Clean out any previous paths that we don't want around. build_path = sh.joinpths(download_dir, ".build") if sh.isdir(build_path): sh.deldir(build_path) sh.mkdir(build_path) cmdline = [ PIP_EXECUTABLE, '-v', 'install', '-I', '-U', '--download', download_dir, '--build', build_path, # Don't download wheels since we lack the ability to create # rpms from them (until future when we will have it, if ever)... "--no-use-wheel", ] for p in pips_to_download: for p_seg in _split(p): if p_seg: cmdline.append(p_seg) sh.execute_save_output(cmdline, output_filename)
def install(self): """Process image installation.""" url_fn = self._extract_url_fn() if not url_fn: raise IOError("Can not determine file name from url: %r" % self._url) if self._cache.is_valid: LOG.info("Found valid cached image+metadata at: %s", colorizer.quote(self._cache.path)) image_details = self._cache.load_details() else: sh.mkdir(self._cache.path) if not self._is_url_local(): fetched_fn, bytes_down = down.UrlLibDownloader( self._url, sh.joinpths(self._cache.path, url_fn)).download() LOG.debug("For url %s we downloaded %s bytes to %s", self._url, bytes_down, fetched_fn) else: fetched_fn = self._url image_details = Unpacker().unpack(url_fn, fetched_fn, self._cache.path) self._cache.save_details(image_details) image_name = self._generate_image_name(url_fn) image_id = self._register(image_name, image_details) return image_name, image_id
def install(self): url_fn = self._extract_url_fn() if not url_fn: raise IOError("Can not determine file name from url: %r" % (self.url)) (cache_path, details_path) = self._cached_paths() use_cached = self._validate_cache(cache_path, details_path) if use_cached: LOG.info("Found valid cached image + metadata at: %s", colorizer.quote(cache_path)) unpack_info = utils.load_yaml_text(sh.load_file(details_path)) else: sh.mkdir(cache_path) if not self._is_url_local(): (fetched_fn, bytes_down) = down.UrlLibDownloader( self.url, sh.joinpths(cache_path, url_fn)).download() LOG.debug("For url %s we downloaded %s bytes to %s", self.url, bytes_down, fetched_fn) else: fetched_fn = self.url unpack_info = Unpacker().unpack(url_fn, fetched_fn, cache_path) sh.write_file(details_path, utils.prettify_yaml(unpack_info)) tgt_image_name = self._generate_img_name(url_fn) img_id = self._register(tgt_image_name, unpack_info) return (tgt_image_name, img_id)
def download_dependencies(self): """Download dependencies from `$deps_dir/download-requires`. """ # NOTE(aababilov): do not drop download_dir - it can be reused sh.mkdirslist(self.download_dir, tracewriter=self.tracewriter) download_requires_filename = sh.joinpths(self.deps_dir, "download-requires") raw_pips_to_download = self.filter_download_requires() sh.write_file(download_requires_filename, "\n".join(str(req) for req in raw_pips_to_download)) if not raw_pips_to_download: return ([], []) downloaded_flag_file = sh.joinpths(self.deps_dir, "pip-downloaded") # NOTE(aababilov): user could have changed persona, so, # check that all requirements are downloaded if sh.isfile(downloaded_flag_file) and self._requirements_satisfied( raw_pips_to_download, self.download_dir): LOG.info("All python dependencies have been already downloaded") else: pip_dir = sh.joinpths(self.deps_dir, "pip") pip_download_dir = sh.joinpths(pip_dir, "download") pip_build_dir = sh.joinpths(pip_dir, "build") # NOTE(aababilov): do not clean the cache, it is always useful pip_cache_dir = sh.joinpths(self.deps_dir, "pip-cache") pip_failures = [] for attempt in xrange(self.MAX_PIP_DOWNLOAD_ATTEMPTS): # NOTE(aababilov): pip has issues with already downloaded files sh.deldir(pip_dir) sh.mkdir(pip_download_dir, recurse=True) header = "Downloading %s python dependencies (attempt %s)" header = header % (len(raw_pips_to_download), attempt) utils.log_iterable(sorted(raw_pips_to_download), logger=LOG, header=header) failed = False try: self._try_download_dependencies(attempt, raw_pips_to_download, pip_download_dir, pip_cache_dir, pip_build_dir) pip_failures = [] except exc.ProcessExecutionError as e: LOG.exception("Failed downloading python dependencies") pip_failures.append(e) failed = True if not failed: break for filename in sh.listdir(pip_download_dir, files_only=True): sh.move(filename, self.download_dir, force=True) sh.deldir(pip_dir) if pip_failures: raise pip_failures[-1] with open(downloaded_flag_file, "w"): pass pips_downloaded = [pip_helper.extract_requirement(p) for p in raw_pips_to_download] self._examine_download_dir(pips_downloaded, self.download_dir) what_downloaded = sh.listdir(self.download_dir, files_only=True) return (pips_downloaded, what_downloaded)
def ensure_anvil_dirs(root_dir): wanted_dirs = ["/etc/anvil/", "/usr/share/anvil/"] if root_dir and root_dir not in wanted_dirs: wanted_dirs.append(root_dir) for d in wanted_dirs: if sh.isdir(d): continue LOG.info("Creating anvil directory at path: %s", d) sh.mkdir(d)
def ensure_anvil_dirs(root_dir): wanted_dirs = list(ANVIL_DIRS) if root_dir and root_dir not in wanted_dirs: wanted_dirs.append(root_dir) for d in wanted_dirs: if sh.isdir(d): continue LOG.info("Creating anvil directory at path: %s", d) sh.mkdir(d)
def ensure_anvil_dirs(root_dir): wanted_dirs = ["/etc/anvil/", '/usr/share/anvil/'] if root_dir and root_dir not in wanted_dirs: wanted_dirs.append(root_dir) for d in wanted_dirs: if sh.isdir(d): continue LOG.info("Creating anvil directory at path: %s", d) sh.mkdir(d)
def ensure_anvil_dirs(root_dir): wanted_dirs = ["/etc/anvil/", '/usr/share/anvil/'] if root_dir and root_dir not in wanted_dirs: wanted_dirs.append(root_dir) for d in wanted_dirs: if sh.isdir(d): continue LOG.info("Creating anvil directory at path: %s", d) with sh.Rooted(True): sh.mkdir(d, adjust_suids=True)
def download_dependencies(self, clear_cache=False): """Download dependencies from `$deps_dir/download-requires`. :param clear_cache: clear `$deps_dir/cache` dir (pip can work incorrectly when it has a cache) """ sh.deldir(self.download_dir) sh.mkdir(self.download_dir, recurse=True) download_requires_filename = sh.joinpths(self.deps_dir, "download-requires") raw_pips_to_download = self.filter_download_requires() pips_to_download = [ pkg_resources.Requirement.parse(str(p.strip())) for p in raw_pips_to_download if p.strip() ] sh.write_file(download_requires_filename, "\n".join(str(req) for req in pips_to_download)) if not pips_to_download: return [] pip_dir = sh.joinpths(self.deps_dir, "pip") pip_download_dir = sh.joinpths(pip_dir, "download") pip_build_dir = sh.joinpths(pip_dir, "build") pip_cache_dir = sh.joinpths(pip_dir, "cache") if clear_cache: sh.deldir(pip_cache_dir) pip_failures = [] how_many = len(pips_to_download) for attempt in xrange(self.MAX_PIP_DOWNLOAD_ATTEMPTS): # NOTE(aababilov): pip has issues with already downloaded files sh.deldir(pip_download_dir) sh.mkdir(pip_download_dir, recurse=True) sh.deldir(pip_build_dir) utils.log_iterable(sorted(raw_pips_to_download), logger=LOG, header=("Downloading %s python dependencies " "(attempt %s)" % (how_many, attempt))) failed = False try: self._try_download_dependencies(attempt, pips_to_download, pip_download_dir, pip_cache_dir, pip_build_dir) pip_failures = [] except exc.ProcessExecutionError as e: LOG.exception("Failed downloading python dependencies") pip_failures.append(e) failed = True if not failed: break if pip_failures: raise pip_failures[-1] for filename in sh.listdir(pip_download_dir, files_only=True): sh.move(filename, self.download_dir) return sh.listdir(self.download_dir, files_only=True)
def download_dependencies(self, clear_cache=False): """Download dependencies from `$deps_dir/download-requires`. :param clear_cache: clear `$deps_dir/cache` dir (pip can work incorrectly when it has a cache) """ sh.deldir(self.download_dir) sh.mkdir(self.download_dir, recurse=True) download_requires_filename = sh.joinpths(self.deps_dir, "download-requires") raw_pips_to_download = self.filter_download_requires() pips_to_download = [pkg_resources.Requirement.parse(str(p.strip())) for p in raw_pips_to_download if p.strip()] sh.write_file(download_requires_filename, "\n".join(str(req) for req in pips_to_download)) if not pips_to_download: return [] pip_dir = sh.joinpths(self.deps_dir, "pip") pip_download_dir = sh.joinpths(pip_dir, "download") pip_build_dir = sh.joinpths(pip_dir, "build") pip_cache_dir = sh.joinpths(pip_dir, "cache") if clear_cache: sh.deldir(pip_cache_dir) pip_failures = [] how_many = len(pips_to_download) for attempt in xrange(self.MAX_PIP_DOWNLOAD_ATTEMPTS): # NOTE(aababilov): pip has issues with already downloaded files sh.deldir(pip_download_dir) sh.mkdir(pip_download_dir, recurse=True) sh.deldir(pip_build_dir) utils.log_iterable(sorted(raw_pips_to_download), logger=LOG, header=("Downloading %s python dependencies " "(attempt %s)" % (how_many, attempt))) failed = False try: self._try_download_dependencies(attempt, pips_to_download, pip_download_dir, pip_cache_dir, pip_build_dir) pip_failures = [] except exc.ProcessExecutionError as e: LOG.exception("Failed downloading python dependencies") pip_failures.append(e) failed = True if not failed: break if pip_failures: raise pip_failures[-1] for filename in sh.listdir(pip_download_dir, files_only=True): sh.move(filename, self.download_dir) return sh.listdir(self.download_dir, files_only=True)
def _unpack_tar(self, file_name, file_location, tmp_dir): (root_name, _) = os.path.splitext(file_name) tar_members = self._filter_files( self._get_tar_file_members(file_location)) (root_img_fn, ramdisk_fn, kernel_fn) = self._find_pieces(tar_members, file_location) if not root_img_fn: msg = "Tar file %r has no root image member" % (file_name) raise IOError(msg) kernel_real_fn = None root_real_fn = None ramdisk_real_fn = None self._log_pieces_found('archive', root_img_fn, ramdisk_fn, kernel_fn) extract_dir = sh.mkdir(sh.joinpths(tmp_dir, root_name)) with contextlib.closing(tarfile.open(file_location, 'r')) as tfh: for m in tfh.getmembers(): if m.name == root_img_fn: root_real_fn = sh.joinpths(extract_dir, sh.basename(root_img_fn)) self._unpack_tar_member(tfh, m, root_real_fn) elif ramdisk_fn and m.name == ramdisk_fn: ramdisk_real_fn = sh.joinpths(extract_dir, sh.basename(ramdisk_fn)) self._unpack_tar_member(tfh, m, ramdisk_real_fn) elif kernel_fn and m.name == kernel_fn: kernel_real_fn = sh.joinpths(extract_dir, sh.basename(kernel_fn)) self._unpack_tar_member(tfh, m, kernel_real_fn) return self._describe(root_real_fn, ramdisk_real_fn, kernel_real_fn)
def __init__(self, distro, root_dir, instances, opts, group, prior_groups): self.distro = distro self.root_dir = root_dir self.instances = instances self.prior_groups = prior_groups self.opts = opts or {} self.group = group self.retries = max(0, int(opts.get('pip_retries', self.RETRIES))) self.retry_delay = max( 0, float(opts.get('pip_retry_delay', self.RETRY_DELAY))) # Various paths we will use while operating self.deps_dir = sh.joinpths(self.root_dir, "deps") self.download_dir = sh.joinpths(self.deps_dir, "download") self.log_dir = sh.joinpths(self.deps_dir, "output") sh.mkdir(self.log_dir, recurse=True) self.gathered_requires_filename = sh.joinpths( self.deps_dir, "pip-requires-group-%s" % group) self.forced_requires_filename = sh.joinpths( self.deps_dir, "forced-requires-group-%s" % group) self.download_requires_filename = sh.joinpths( self.deps_dir, "download-requires-group-%s" % group) self.multipip = multipip_helper.Helper() # List of requirements self.pips_to_install = [] self.forced_pips = [] # Instances to there app directory (with a setup.py inside) self.package_dirs = self._get_package_dirs(instances) # Track what file we create so they can be cleaned up on uninstall. trace_fn = tr.trace_filename(self.root_dir, 'deps') self.tracewriter = tr.TraceWriter(trace_fn, break_if_there=False) self.tracereader = tr.TraceReader(trace_fn) self.requirements = {} for key in ("build-requires", "requires", "conflicts"): req_set = set() for inst in self.instances: req_set |= set(pkg["name"] for pkg in inst.get_option(key) or []) self.requirements[key] = req_set ignore_pips = set() ignore_distro_pips = self.distro.get_dependency_config( "ignoreable_pips", quiet=True) if ignore_distro_pips: ignore_pips.update(ignore_distro_pips) self.ignore_pips = ignore_pips
def __init__(self, distro, root_dir, instances, opts): self.distro = distro self.root_dir = root_dir self.instances = instances self.opts = opts or {} # Various paths we will use while operating self.deps_dir = sh.joinpths(self.root_dir, "deps") self.downloaded_flag_file = sh.joinpths(self.deps_dir, "pip-downloaded") self.download_dir = sh.joinpths(self.deps_dir, "download") self.log_dir = sh.joinpths(self.deps_dir, "output") sh.mkdir(self.log_dir, recurse=True) self.gathered_requires_filename = sh.joinpths(self.deps_dir, "pip-requires") self.forced_requires_filename = sh.joinpths(self.deps_dir, "forced-requires") self.download_requires_filename = sh.joinpths(self.deps_dir, "download-requires") self.multipip = multipip_helper.Helper() # List of requirements self.pips_to_install = [] self.forced_packages = [] # Instances to there app directory (with a setup.py inside) self.package_dirs = self._get_package_dirs(instances) # Track what file we create so they can be cleaned up on uninstall. trace_fn = tr.trace_filename(self.root_dir, 'deps') self.tracewriter = tr.TraceWriter(trace_fn, break_if_there=False) self.tracereader = tr.TraceReader(trace_fn) self.requirements = {} for key in ("build-requires", "requires", "conflicts"): req_set = set() for inst in self.instances: req_set |= set(pkg["name"] for pkg in inst.get_option(key) or []) self.requirements[key] = req_set # These pip names we will ignore from being converted/analyzed... ignore_pips = self.distro.get_dependency_config("ignoreable_pips", quiet=True) if not ignore_pips: self.ignore_pips = set() else: self.ignore_pips = set(ignore_pips)
def __init__(self, distro, root_dir, instances, opts, group, prior_groups): self.distro = distro self.root_dir = root_dir self.instances = instances self.prior_groups = prior_groups self.opts = opts or {} self.group = group self.retries = max(0, int(opts.get('pip_retries', self.RETRIES))) self.retry_delay = max(0, float(opts.get('pip_retry_delay', self.RETRY_DELAY))) # Various paths we will use while operating self.deps_dir = sh.joinpths(self.root_dir, "deps") self.download_dir = sh.joinpths(self.deps_dir, "download") self.log_dir = sh.joinpths(self.deps_dir, "output") sh.mkdir(self.log_dir, recurse=True) self.gathered_requires_filename = sh.joinpths(self.deps_dir, "pip-requires-group-%s" % group) self.forced_requires_filename = sh.joinpths(self.deps_dir, "forced-requires-group-%s" % group) self.download_requires_filename = sh.joinpths(self.deps_dir, "download-requires-group-%s" % group) self.multipip = multipip_helper.Helper() # List of requirements self.pips_to_install = [] self.forced_pips = [] # Instances to there app directory (with a setup.py inside) self.package_dirs = self._get_package_dirs(instances) # Track what file we create so they can be cleaned up on uninstall. trace_fn = tr.trace_filename(self.root_dir, 'deps') self.tracewriter = tr.TraceWriter(trace_fn, break_if_there=False) self.tracereader = tr.TraceReader(trace_fn) self.requirements = {} for key in ("build-requires", "requires", "conflicts"): req_set = set() for inst in self.instances: req_set |= set(pkg["name"] for pkg in inst.get_option(key) or []) self.requirements[key] = req_set ignore_pips = set() ignore_distro_pips = self.distro.get_dependency_config("ignoreable_pips", quiet=True) if ignore_distro_pips: ignore_pips.update(ignore_distro_pips) self.ignore_pips = ignore_pips
def download_dependencies(download_dir, pips_to_download, output_filename): if not pips_to_download: return # NOTE(aababilov): pip has issues with already downloaded files if sh.isdir(download_dir): for filename in sh.listdir(download_dir, files_only=True): sh.unlink(filename) else: sh.mkdir(download_dir) # Clean out any previous paths that we don't want around. build_path = sh.joinpths(download_dir, ".build") if sh.isdir(build_path): sh.deldir(build_path) sh.mkdir(build_path) # Ensure certain directories exist that we want to exist (but we don't # want to delete them run after run). cache_path = sh.joinpths(download_dir, ".cache") if not sh.isdir(cache_path): sh.mkdir(cache_path) cmdline = [ PIP_EXECUTABLE, '-v', 'install', '-I', '-U', '--download', download_dir, '--build', build_path, '--download-cache', cache_path, ] # Don't download wheels... # # See: https://github.com/pypa/pip/issues/1439 if dist_version.StrictVersion(PIP_VERSION) >= dist_version.StrictVersion('1.5'): cmdline.append("--no-use-wheel") cmdline.extend([str(p) for p in pips_to_download]) sh.execute_save_output(cmdline, output_filename)
def install(self): url_fn = self._extract_url_fn() if not url_fn: raise IOError("Can not determine file name from url: %r" % (self.url)) (cache_path, details_path) = self._cached_paths() use_cached = self._validate_cache(cache_path, details_path) if use_cached: LOG.info("Found valid cached image + metadata at: %s", colorizer.quote(cache_path)) unpack_info = utils.load_yaml_text(sh.load_file(details_path)) else: sh.mkdir(cache_path) if not self._is_url_local(): (fetched_fn, bytes_down) = down.UrlLibDownloader(self.url, sh.joinpths(cache_path, url_fn)).download() LOG.debug("For url %s we downloaded %s bytes to %s", self.url, bytes_down, fetched_fn) else: fetched_fn = self.url unpack_info = Unpacker().unpack(url_fn, fetched_fn, cache_path) sh.write_file(details_path, utils.prettify_yaml(unpack_info)) tgt_image_name = self._generate_img_name(url_fn) img_id = self._register(tgt_image_name, unpack_info) return (tgt_image_name, img_id)
def get_archive_details(filename): if not sh.isfile(filename): raise IOError("Can not detail non-existent file %s" % (filename)) # Check if we already got the details of this file previously cache_key = "f:%s:%s" % (sh.basename(filename), sh.getsize(filename)) if cache_key in EGGS_DETAILED: return EGGS_DETAILED[cache_key] # Get pip to get us the egg-info. with utils.tempdir() as td: filename = sh.copy(filename, sh.joinpths(td, sh.basename(filename))) extract_to = sh.mkdir(sh.joinpths(td, 'build')) pip_util.unpack_file(filename, extract_to, content_type='', link='') details = get_directory_details(extract_to) EGGS_DETAILED[cache_key] = details return details
def _unpack_tar(self, file_name, file_location, tmp_dir): (root_name, _) = os.path.splitext(file_name) tar_members = self._filter_files(self._get_tar_file_members(file_location)) (root_img_fn, ramdisk_fn, kernel_fn) = self._find_pieces(tar_members, file_location) if not root_img_fn: msg = "Tar file %r has no root image member" % (file_name) raise IOError(msg) kernel_real_fn = None root_real_fn = None ramdisk_real_fn = None self._log_pieces_found('archive', root_img_fn, ramdisk_fn, kernel_fn) extract_dir = sh.mkdir(sh.joinpths(tmp_dir, root_name)) with contextlib.closing(tarfile.open(file_location, 'r')) as tfh: for m in tfh.getmembers(): if m.name == root_img_fn: root_real_fn = sh.joinpths(extract_dir, sh.basename(root_img_fn)) self._unpack_tar_member(tfh, m, root_real_fn) elif ramdisk_fn and m.name == ramdisk_fn: ramdisk_real_fn = sh.joinpths(extract_dir, sh.basename(ramdisk_fn)) self._unpack_tar_member(tfh, m, ramdisk_real_fn) elif kernel_fn and m.name == kernel_fn: kernel_real_fn = sh.joinpths(extract_dir, sh.basename(kernel_fn)) self._unpack_tar_member(tfh, m, kernel_real_fn) return self._describe(root_real_fn, ramdisk_real_fn, kernel_real_fn)
def run(args): """Starts the execution after args have been parsed and logging has been setup. """ LOG.debug("CLI arguments are:") utils.log_object(args, logger=LOG, level=logging.DEBUG, item_max_len=128) # Keep the old args around so we have the full set to write out saved_args = dict(args) action = args.pop("action", '').strip().lower() if re.match(r"^moo[o]*$", action): return try: runner_cls = actions.class_for(action) except Exception as ex: raise excp.OptionException(str(ex)) if runner_cls.needs_sudo: ensure_perms() # Check persona file exists persona_fn = args.pop('persona_fn') if not persona_fn: raise excp.OptionException("No persona file name specified!") if not sh.isfile(persona_fn): raise excp.OptionException("Invalid persona file %r specified!" % (persona_fn)) # Check origin file exists origins_fn = args.pop('origins_fn') if not origins_fn: raise excp.OptionException("No origin file name specified!") if not sh.isfile(origins_fn): raise excp.OptionException("Invalid origin file %r specified!" % (origins_fn)) args['origins_fn'] = sh.abspth(origins_fn) # Determine the root directory... root_dir = sh.abspth(args.pop("dir")) (repeat_string, line_max_len) = utils.welcome() print(pprint.center_text("Action Runner", repeat_string, line_max_len)) # !! # Here on out we should be using the logger (and not print)!! # !! # Ensure the anvil dirs are there if others are about to use it... if not sh.isdir(root_dir): LOG.info("Creating anvil root directory at path: %s", root_dir) sh.mkdir(root_dir) try: for d in ANVIL_DIRS: if sh.isdir(d): continue LOG.info("Creating anvil auxiliary directory at path: %s", d) sh.mkdir(d) except OSError as e: LOG.warn("Failed ensuring auxiliary directories due to %s", e) # Load the origins... origins = _origins.load(args['origins_fn'], patch_file=args.get('origins_patch')) # Load the distro/s possible_distros = distro.load(settings.DISTRO_DIR, distros_patch=args.get('distros_patch')) # Load + match the persona to the possible distros... try: persona_obj = persona.load(persona_fn) except Exception as e: raise excp.OptionException("Error loading persona file: %s due to %s" % (persona_fn, e)) else: dist = persona_obj.match(possible_distros, origins) LOG.info('Persona selected distro: %s from %s possible distros', colorizer.quote(dist.name), len(possible_distros)) # Update the dist with any other info... dist.inject_platform_overrides(persona_obj.distro_updates, source=persona_fn) dist.inject_platform_overrides(origins, source=origins_fn) # Print it out... LOG.debug("Distro settings are:") for line in dist.pformat(item_max_len=128).splitlines(): LOG.debug(line) # Get the object we will be running with... runner = runner_cls(distro=dist, root_dir=root_dir, name=action, cli_opts=args) # Now that the settings are known to work, store them for next run store_current_settings(saved_args) LOG.info("Starting action %s on %s for distro: %s", colorizer.quote(action), colorizer.quote(utils.iso8601()), colorizer.quote(dist.name)) LOG.info("Using persona: %s", colorizer.quote(persona_fn)) LOG.info("Using origins: %s", colorizer.quote(origins_fn)) LOG.info("In root directory: %s", colorizer.quote(root_dir)) start_time = time.time() runner.run(persona_obj) end_time = time.time() pretty_time = utils.format_time(end_time - start_time) LOG.info("It took %s seconds or %s minutes to complete action %s.", colorizer.quote(pretty_time['seconds']), colorizer.quote(pretty_time['minutes']), colorizer.quote(action))
def run(args): """ Starts the execution after args have been parsed and logging has been setup. Arguments: N/A Returns: True for success to run, False for failure to start """ LOG.debug("CLI arguments are:") utils.log_object(args, logger=LOG, level=logging.DEBUG, item_max_len=128) # Keep the old args around so we have the full set to write out saved_args = dict(args) action = args.pop("action", '').strip().lower() if action not in actions.names(): raise excp.OptionException("Invalid action name %r specified!" % (action)) # Determine + setup the root directory... # If not provided attempt to locate it via the environment control files args_root_dir = args.pop("dir") root_dir = env.get_key('INSTALL_ROOT') if not root_dir: root_dir = args_root_dir if not root_dir: root_dir = sh.joinpths(sh.gethomedir(), 'openstack') root_dir = sh.abspth(root_dir) sh.mkdir(root_dir) persona_fn = args.pop('persona_fn') if not persona_fn: raise excp.OptionException("No persona file name specified!") if not sh.isfile(persona_fn): raise excp.OptionException("Invalid persona file %r specified!" % (persona_fn)) # !! # Here on out we should be using the logger (and not print)!! # !! # Stash the dryrun value (if any) if 'dryrun' in args: env.set("ANVIL_DRYRUN", str(args['dryrun'])) # Ensure the anvil etc dir is there if others are about to use it ensure_anvil_dir() # Load the distro dist = distro.load(settings.DISTRO_DIR) # Load + verify the person try: persona_obj = persona.load(persona_fn) persona_obj.verify(dist) except Exception as e: raise excp.OptionException("Error loading persona file: %s due to %s" % (persona_fn, e)) # Get the object we will be running with... runner_cls = actions.class_for(action) runner = runner_cls(distro=dist, root_dir=root_dir, name=action, cli_opts=args) (repeat_string, line_max_len) = utils.welcome() print(center_text("Action Runner", repeat_string, line_max_len)) # Now that the settings are known to work, store them for next run store_current_settings(saved_args) LOG.info("Starting action %s on %s for distro: %s", colorizer.quote(action), colorizer.quote(utils.iso8601()), colorizer.quote(dist.name)) LOG.info("Using persona: %s", colorizer.quote(persona_fn)) LOG.info("In root directory: %s", colorizer.quote(root_dir)) start_time = time.time() runner.run(persona_obj) end_time = time.time() pretty_time = utils.format_time(end_time - start_time) LOG.info("It took %s seconds or %s minutes to complete action %s.", colorizer.quote(pretty_time['seconds']), colorizer.quote(pretty_time['minutes']), colorizer.quote(action))
def ensure_anvil_dirs(): for d in ["/etc/anvil/", '/usr/share/anvil/']: with sh.Rooted(True): sh.mkdir(d, adjust_suids=True)
def run(args): """ Starts the execution after args have been parsed and logging has been setup. Arguments: N/A Returns: True for success to run, False for failure to start """ LOG.debug("CLI arguments are:") utils.log_object(args, logger=LOG, level=logging.DEBUG, item_max_len=128) # Keep the old args around so we have the full set to write out saved_args = dict(args) action = args.pop("action", '').strip().lower() if action not in actions.names(): raise excp.OptionException("Invalid action name %r specified!" % (action)) # Determine + setup the root directory... # If not provided attempt to locate it via the environment control files args_root_dir = args.pop("dir") root_dir = env.get_key('INSTALL_ROOT') if not root_dir: root_dir = args_root_dir if not root_dir: root_dir = sh.joinpths(sh.gethomedir(), 'openstack') root_dir = sh.abspth(root_dir) sh.mkdir(root_dir) persona_fn = args.pop('persona_fn') if not persona_fn: raise excp.OptionException("No persona file name specified!") if not sh.isfile(persona_fn): raise excp.OptionException("Invalid persona file %r specified!" % (persona_fn)) # !! # Here on out we should be using the logger (and not print)!! # !! # Stash the dryrun value (if any) if 'dryrun' in args: env.set("ANVIL_DRYRUN", str(args['dryrun'])) # Ensure the anvil dirs are there if others are about to use it... ensure_anvil_dirs() # Load the distro dist = distro.load(settings.DISTRO_DIR) # Load + verify the person try: persona_obj = persona.load(persona_fn) persona_obj.verify(dist) except Exception as e: raise excp.OptionException("Error loading persona file: %s due to %s" % (persona_fn, e)) # Get the object we will be running with... runner_cls = actions.class_for(action) runner = runner_cls(distro=dist, root_dir=root_dir, name=action, cli_opts=args) (repeat_string, line_max_len) = utils.welcome() print(center_text("Action Runner", repeat_string, line_max_len)) # Now that the settings are known to work, store them for next run store_current_settings(saved_args) LOG.info("Starting action %s on %s for distro: %s", colorizer.quote(action), colorizer.quote(utils.iso8601()), colorizer.quote(dist.name)) LOG.info("Using persona: %s", colorizer.quote(persona_fn)) LOG.info("In root directory: %s", colorizer.quote(root_dir)) start_time = time.time() runner.run(persona_obj) end_time = time.time() pretty_time = utils.format_time(end_time - start_time) LOG.info("It took %s seconds or %s minutes to complete action %s.", colorizer.quote(pretty_time['seconds']), colorizer.quote(pretty_time['minutes']), colorizer.quote(action))