def start(self): # Anything to start? am_started = 0 apps_to_start = self._get_apps_to_start() if not apps_to_start: return am_started # Select how we are going to start it run_type = self._fetch_run_type() starter = importer.import_entry_point(run_type)(self) for app_info in apps_to_start: app_name = app_info["name"] app_pth = app_info.get("path", app_name) app_dir = app_info.get("app_dir", self.get_option('app_dir')) # Adjust the program options now that we have real locations program_opts = utils.param_replace_list(self._get_app_options(app_name), self._get_param_map(app_name)) # Start it with the given settings LOG.debug("Starting %r using %r", app_name, run_type) details_fn = starter.start(app_name, app_pth=app_pth, app_dir=app_dir, opts=program_opts) LOG.info("Started %s details are in %s", colorizer.quote(app_name), colorizer.quote(details_fn)) # This trace is used to locate details about what to stop self.tracewriter.app_started(app_name, details_fn, run_type) if app_info.get('sleep_time'): LOG.info("%s requested a %s second sleep time, please wait...", colorizer.quote(app_name), app_info.get('sleep_time')) sh.sleep(app_info.get('sleep_time')) am_started += 1 return am_started
def main(): """ Starts the execution of without injecting variables into the global namespace. Ensures that logging is setup and that sudo access is available and in-use. Arguments: N/A Returns: 1 for success, 0 for failure and 2 for permission change failure. """ # Do this first so people can see the help message... args = opts.parse(load_previous_settings()) # Configure logging levels log_level = logging.INFO if args['verbose'] or args['dryrun']: log_level = logging.DEBUG logging.setupLogging(log_level) LOG.debug("Log level is: %s" % (logging.getLevelName(log_level))) def print_exc(exc): if not exc: return msg = str(exc).strip() if not msg: return if not (msg.endswith(".") or msg.endswith("!")): msg = msg + "." if msg: print(msg) def print_traceback(): traceback = None if log_level < logging.INFO: # See: http://docs.python.org/library/traceback.html # When its not none u get more detailed info about the exception traceback = sys.exc_traceback tb.print_exception(sys.exc_type, sys.exc_value, traceback, file=sys.stdout) try: ensure_perms() except excp.PermException as e: print_exc(e) print(("This program should be running via %s as it performs some root-only commands is it not?") % (colorizer.quote('sudo', quote_color='red'))) return 2 try: run(args) utils.goodbye(True) return 0 except excp.OptionException as e: print_exc(e) print("Perhaps you should try %s" % (colorizer.quote('--help', quote_color='red'))) return 1 except Exception: utils.goodbye(False) print_traceback() return 1
def replace_forced_requirements(fn, forced_by_key): old_lines = sh.load_file(fn).splitlines() new_lines = [] alterations = [] for line in old_lines: try: source_req = pip_helper.extract_requirement(line) except (ValueError, TypeError): pass else: if source_req: validate_requirement(fn, source_req) try: replace_req = forced_by_key[source_req.key] except KeyError: pass else: replace_req = str(replace_req) source_req = str(source_req) if replace_req != source_req: line = replace_req alterations.append("%s => %s" % (colorizer.quote(source_req), colorizer.quote(replace_req))) new_lines.append(line) if alterations: contents = "# Cleaned on %s\n\n%s\n" % (utils.iso8601(), "\n".join(new_lines)) sh.write_file_and_backup(fn, contents) utils.log_iterable(alterations, logger=LOG, header="Replaced %s requirements in %s" % (len(alterations), fn), color=None) return len(alterations)
def _quote_status(self, status): if status == constants.STATUS_UNKNOWN: return colorizer.quote(status, quote_color='yellow') elif status == constants.STATUS_STARTED or status == constants.STATUS_INSTALLED: return colorizer.quote(status, quote_color='green') else: return colorizer.quote(status, quote_color='red')
def install(self, pkg): name = pkg['name'] version = pkg.get('version') skip_install = False if name in self.registry.installed: existing_version = self.registry.installed[name] if version == existing_version: LOG.debug("Skipping install of %r since it already happened.", name) skip_install = True else: if existing_version is not None: if utils.versionize(existing_version) < utils.versionize(version): LOG.warn(("A request has come in for a 'potentially' newer version of %s v(%s)," " when v(%s) was previously installed!"), colorizer.quote(name), version, existing_version) elif utils.versionize(existing_version) > utils.versionize(version): LOG.warn(("A request has come in for a 'potentially' older version of %s v(%s), " "when v(%s) was previously installed!"), colorizer.quote(name), version, existing_version) else: LOG.warn(("A request has come in for a 'potentially' different version of %s v(%s)," " when a unspecified version was previously installed!"), colorizer.quote(name), version) if not skip_install: self._install(pkg) LOG.debug("Noting that %r - v(%s) was installed.", name, (version or "??")) self.registry.installed[name] = version if name in self.registry.removed: del(self.registry.removed[name])
def pre_uninstall(self): dbtype = self.get_option("type") dbactions = self.distro.get_command_config(dbtype, quiet=True) try: if dbactions: LOG.info( ("Attempting to reset your db password to %s so" " that we can set it the next time you install."), colorizer.quote(RESET_BASE_PW), ) pwd_cmd = self.distro.get_command(dbtype, "set_pwd") if pwd_cmd: LOG.info("Ensuring your database is started before we operate on it.") self.runtime.start() self.runtime.wait_active() params = { "OLD_PASSWORD": dbhelper.get_shared_passwords(self)["pw"], "NEW_PASSWORD": RESET_BASE_PW, "USER": self.get_option("user", default_value="root"), } cmds = [{"cmd": pwd_cmd}] utils.execute_template(*cmds, params=params) except IOError: LOG.warn( ( "Could not reset the database password. You might have to manually " "reset the password to %s before the next install" ), colorizer.quote(RESET_BASE_PW), )
def _filter_package_files(package_files): package_reqs = [] package_keys = [] for filename in package_files: package_details = pip_helper.get_archive_details(filename) package_reqs.append(package_details['req']) package_keys.append(package_details['req'].key) package_rpm_names = self._convert_names_python2rpm(package_keys) filtered_files = [] for (filename, req, rpm_name) in zip(package_files, package_reqs, package_rpm_names): if req.key in no_pips: LOG.info(("Dependency %s was downloaded additionally " "but it is disallowed."), colorizer.quote(req)) continue if req.key in pips_keys: filtered_files.append(filename) continue # See if pip tried to download it but we already can satisfy # it via yum and avoid building it in the first place... (_version, repo) = self._find_yum_match(yum_map, req, rpm_name) if not repo: filtered_files.append(filename) else: LOG.info(("Dependency %s was downloaded additionally " "but it can be satisfied by %s from repository " "%s instead."), colorizer.quote(req), colorizer.quote(rpm_name), colorizer.quote(repo)) return filtered_files
def install_start(instance): subsystems = set(list(instance.subsystems)) if subsystems: utils.log_iterable(sorted(subsystems), logger=LOG, header='Installing %s using subsystems' % colorizer.quote(instance.name)) else: LOG.info("Installing %s.", colorizer.quote(instance.name))
def run_tests(self): app_dir = self.get_option('app_dir') if not sh.isdir(app_dir): LOG.warn("Unable to find application directory at %s, can not run %s tests.", colorizer.quote(app_dir), colorizer.quote(self.name)) return pre_cmd = self._get_pre_test_command() cmd = self._get_test_command() if not cmd: LOG.warn("Unable to determine test command for %s, can not run tests.", colorizer.quote(self.name)) return env = self._get_env() try: if pre_cmd: LOG.info("Running test setup via: %s", utils.truncate_text(" ".join(pre_cmd), 80)) sh.execute(pre_cmd, stdout_fh=sys.stdout, stderr_fh=sys.stdout, cwd=app_dir, env_overrides=env) LOG.info("Running tests via: %s", utils.truncate_text(" ".join(cmd), 80)) sh.execute(cmd, stdout_fh=sys.stdout, stderr_fh=sys.stdout, cwd=app_dir, env_overrides=env) except excp.ProcessExecutionError as e: if self.ignore_test_failures: LOG.warn("Ignoring test failure of component %s: %s", colorizer.quote(self.name), e) else: raise
def _print_status(self, component, result): if not result: LOG.info("Status of %s is %s.", colorizer.quote(component.name), self._quote_status(STATUS_UNKNOWN)) return def details_printer(entry, spacing, max_len): det = utils.truncate_text(entry.details, max_len=max_len, from_bottom=True) for line in det.splitlines(): line = line.replace("\t", "\\t") line = line.replace("\r", "\\r") line = utils.truncate_text(line, max_len=120) LOG.info("%s>> %s", (" " * spacing), line) if len(result) == 1: s = result[0] if s.name and s.name != component.name: LOG.info( "Status of %s (%s) is %s.", colorizer.quote(component.name), s.name, self._quote_status(s.status) ) else: LOG.info("Status of %s is %s.", colorizer.quote(component.name), self._quote_status(s.status)) if self.show_amount > 0 and s.details: details_printer(s, 2, self.show_amount) else: LOG.info("Status of %s is:", colorizer.quote(component.name)) for s in result: LOG.info("|-- %s is %s.", s.name, self._quote_status(s.status)) if self.show_amount > 0 and s.details: details_printer(s, 4, self.show_amount)
def _finish_package(self, component, where): if not where: LOG.info("Component %s can not create a package.", colorizer.quote(component.name)) else: LOG.info("Package created at %s for component %s.", colorizer.quote(where), colorizer.quote(component.name))
def _run(self, persona, component_order, instances): self._run_phase( PhaseFunctors( start=None, run=lambda i: i.pre_start(), end=None, ), component_order, instances, "pre-start", ) self._run_phase( PhaseFunctors( start=lambda i: LOG.info('Starting %s.', i.name), run=lambda i: i.start(), end=lambda i, result: LOG.info("Start %s applications", colorizer.quote(result)), ), component_order, instances, "start", 'stopped' ) self._run_phase( PhaseFunctors( start=lambda i: LOG.info('Post-starting %s.', colorizer.quote(i.name)), run=lambda i: i.post_start(), end=None, ), component_order, instances, "post-start", 'stopped' )
def _filter_package_files(): yum_provided = [] req_names = [req.key for (filename, req) in package_reqs] package_rpm_names = self.py2rpm_helper.names_to_rpm_names(req_names) filtered_files = [] for filename, req in package_reqs: rpm_name = package_rpm_names[req.key] if req.key in no_pips: LOG.info(("Dependency %s was downloaded additionally " "but it is disallowed."), colorizer.quote(req)) continue if req.key in pips_keys: filtered_files.append(filename) continue # See if pip tried to download it but we already can satisfy # it via yum and avoid building it in the first place... rpm_info = self._find_yum_match(yum_map, req, rpm_name) if not rpm_info: filtered_files.append(filename) else: yum_provided.append((req, rpm_info)) LOG.info(("Dependency %s was downloaded additionally " "but it can be satisfied by %s from repository " "%s instead."), colorizer.quote(req), colorizer.quote(rpm_name), colorizer.quote(rpm_info['repo'])) return (filtered_files, yum_provided)
def _register(self, image_name, location): # Upload the kernel, if we have one kernel = location.pop('kernel', None) kernel_id = '' if kernel: kernel_image_name = "%s-vmlinuz" % (image_name) self._check_name(kernel_image_name) LOG.info('Adding kernel %s to glance.', colorizer.quote(kernel_image_name)) LOG.info("Please wait installing...") args = { 'container_format': kernel['container_format'], 'disk_format': kernel['disk_format'], 'name': kernel_image_name, 'is_public': self.is_public, } with open(kernel['file_name'], 'r') as fh: resource = self.client.images.create(data=fh, **args) kernel_id = resource.id # Upload the ramdisk, if we have one initrd = location.pop('ramdisk', None) initrd_id = '' if initrd: ram_image_name = "%s-initrd" % (image_name) self._check_name(ram_image_name) LOG.info('Adding ramdisk %s to glance.', colorizer.quote(ram_image_name)) LOG.info("Please wait installing...") args = { 'container_format': initrd['container_format'], 'disk_format': initrd['disk_format'], 'name': ram_image_name, 'is_public': self.is_public, } with open(initrd['file_name'], 'r') as fh: resource = self.client.images.create(data=fh, **args) initrd_id = resource.id # Upload the root, we must have one... LOG.info('Adding image %s to glance.', colorizer.quote(image_name)) self._check_name(image_name) args = { 'name': image_name, 'container_format': location['container_format'], 'disk_format': location['disk_format'], 'is_public': self.is_public, 'properties': {}, } if kernel_id or initrd_id: if kernel_id: args['properties']['kernel_id'] = kernel_id if initrd_id: args['properties']['ramdisk_id'] = initrd_id LOG.info("Please wait installing...") with open(location['file_name'], 'r') as fh: resource = self.client.images.create(data=fh, **args) img_id = resource.id return img_id
def run_tests(self): app_dir = self.get_option('app_dir') if not sh.isdir(app_dir): LOG.warn("Unable to find application directory at %s, can not run %s tests.", colorizer.quote(app_dir), colorizer.quote(self.name)) return cmd = self._get_test_command() env = self._get_env() sh.execute(*cmd, stdout_fh=None, stderr_fh=None, cwd=app_dir, env_overrides=env)
def pre_uninstall(self): try: self.runtime.restart() LOG.info("Attempting to reset the rabbit-mq guest password to: %s", colorizer.quote(RESET_BASE_PW)) cmd = self.distro.get_command('rabbit-mq', 'change_password') + [RESET_BASE_PW] sh.execute(*cmd, run_as_root=True) except IOError: LOG.warn(("Could not reset the rabbit-mq password. You might have to manually " "reset the password to %s before the next install"), colorizer.quote(RESET_BASE_PW))
def _run(self, persona, component_order, instances): removals = ['pre-uninstall', 'post-uninstall'] self._run_phase( action.PhaseFunctors( start=lambda i: LOG.info('Preinstalling %s.', colorizer.quote(i.name)), run=lambda i: i.pre_install(), end=None, ), component_order, instances, "pre-install", *removals ) removals += ["package-uninstall", 'uninstall', "package-destroy"] dependency_handler_class = self.distro.dependency_handler_class dependency_handler = dependency_handler_class(self.distro, self.root_dir, instances.values()) general_package = "general" self._run_phase( action.PhaseFunctors( start=lambda i: LOG.info("Installing packages"), run=lambda i: dependency_handler.install(), end=None, ), [general_package], {general_package: instances[general_package]}, "package-install", *removals ) removals += ['unconfigure'] self._run_phase( action.PhaseFunctors( start=lambda i: LOG.info('Configuring %s.', colorizer.quote(i.name)), run=lambda i: i.configure(), end=None, ), component_order, instances, "configure", *removals ) self._run_phase( action.PhaseFunctors( start=lambda i: LOG.info('Post-installing %s.', colorizer.quote(i.name)), run=lambda i: i.post_install(), end=None ), component_order, instances, "post-install", *removals )
def run(self, persona): groups = self._construct_instances(persona) LOG.info("Processing components for action %s.", colorizer.quote(self.name)) for group in persona.matched_components: utils.log_iterable(group, header="Activating group %s in the following order" % colorizer.quote(group.id), logger=LOG) self._on_start(persona, groups) self._run(persona, groups) self._on_finish(persona, groups)
def _run(self, persona, component_order, instances): dependency_handler_class = self.distro.dependency_handler_class dependency_handler = dependency_handler_class(self.distro, self.root_dir, instances.values(), self.cli_opts) removals = states.reverts("pre-install") self._run_phase( action.PhaseFunctors( start=lambda i: LOG.info('Preinstalling %s.', colorizer.quote(i.name)), run=lambda i: i.pre_install(), end=None, ), component_order, instances, "pre-install", *removals ) removals.extend(states.reverts("package-install")) general_package = "general" self._run_phase( action.PhaseFunctors( start=lambda i: LOG.info("Installing packages"), run=dependency_handler.install, end=None, ), [general_package], {general_package: instances[general_package]}, "package-install", *removals ) removals.extend(states.reverts("configure")) self._run_phase( action.PhaseFunctors( start=lambda i: LOG.info('Configuring %s.', colorizer.quote(i.name)), run=lambda i: i.configure(), end=None, ), component_order, instances, "configure", *removals ) removals.extend(states.reverts("post-install")) self._run_phase( action.PhaseFunctors( start=lambda i: LOG.info('Post-installing %s.', colorizer.quote(i.name)), run=lambda i: i.post_install(), end=None ), component_order, instances, "post-install", *removals )
def _register(self, image_name, location): """Register image in glance.""" # Upload the kernel, if we have one kernel = location.pop('kernel', None) kernel_id = '' if kernel: kernel_image_name = "%s-vmlinuz" % (image_name) self._check_name(kernel_image_name) LOG.info('Adding kernel %s to glance.', colorizer.quote(kernel_image_name)) LOG.info("Please wait installing...") conf = { 'container_format': kernel['container_format'], 'disk_format': kernel['disk_format'], 'name': kernel_image_name, 'is_public': self._is_public, } kernel_id = self._create(kernel['file_name'], **conf) # Upload the ramdisk, if we have one initrd = location.pop('ramdisk', None) initrd_id = '' if initrd: ram_image_name = "%s-initrd" % (image_name) self._check_name(ram_image_name) LOG.info('Adding ramdisk %s to glance.', colorizer.quote(ram_image_name)) LOG.info("Please wait installing...") conf = { 'container_format': initrd['container_format'], 'disk_format': initrd['disk_format'], 'name': ram_image_name, 'is_public': self._is_public, } initrd_id = self._create(initrd['file_name'], **conf) # Upload the root, we must have one LOG.info('Adding image %s to glance.', colorizer.quote(image_name)) self._check_name(image_name) conf = { 'name': image_name, 'container_format': location['container_format'], 'disk_format': location['disk_format'], 'is_public': self._is_public, 'properties': {}, } if kernel_id or initrd_id: if kernel_id: conf['properties']['kernel_id'] = kernel_id if initrd_id: conf['properties']['ramdisk_id'] = initrd_id LOG.info("Please wait installing...") image_id = self._create(location['file_name'], **conf) return image_id
def pre_uninstall(self): bridges = self.get_option('bridges', default_value=[]) if bridges: LOG.info("Attempting to delete %s bridges: %s." % (colorizer.quote(self.name), ", ".join(bridges))) LOG.info("Ensuring %s service is started before we use it." % colorizer.quote(self.name)) self.runtime.start() self.runtime.wait_active() for bridge in bridges: self._del_bridge(bridge)
def stop_app(self, program): LOG.info("Stopping program %s under component %s.", colorizer.quote(program), self.name) stop_cmd = self.get_command("stop", program) try: sh.execute(stop_cmd, shell=True) except excp.ProcessExecutionError: LOG.error("Failed to stop program %s under component %s.", colorizer.quote(program), self.name) return False return True
def _print_status(self, component, result): if isinstance(result, (dict)): LOG.info("Status of %s is:", colorizer.quote(component.name)) for (name, status) in result.items(): LOG.info("|-- %s --> %s.", colorizer.quote(name, quote_color='blue'), self._quote_status(status)) elif isinstance(result, (list, set)): LOG.info("Status of %s is:", colorizer.quote(component.name)) for status in result: LOG.info("|-- %s.", self._quote_status(status)) else: LOG.info("Status of %s is %s.", colorizer.quote(component.name), self._quote_status(result))
def _log_pieces_found(self, src_type, root_fn, ramdisk_fn, kernel_fn): pieces = [] if root_fn: pieces.append("%s (root image)" % (colorizer.quote(root_fn))) if ramdisk_fn: pieces.append("%s (ramdisk image)" % (colorizer.quote(ramdisk_fn))) if kernel_fn: pieces.append("%s (kernel image)" % (colorizer.quote(kernel_fn))) if pieces: utils.log_iterable(pieces, logger=LOG, header="Found %s images from a %s" % (len(pieces), src_type))
def _run(self, persona, component_order, instances): self._run_phase( PhaseFunctors( start=lambda i: LOG.info('Stopping %s.', colorizer.quote(i.name)), run=lambda i: i.stop(), end=lambda i, result: LOG.info("Stopped %s items.", colorizer.quote(result)), ), component_order, instances, "Stopped" )
def _run(self, persona, component_order, instances): removals = ['configure'] self._run_phase( action.PhaseFunctors( start=lambda i: LOG.info('Unconfiguring %s.', colorizer.quote(i.name)), run=lambda i: i.unconfigure(), end=None, ), component_order, instances, 'unconfigure', *removals ) removals += ['post-install'] self._run_phase( action.PhaseFunctors( start=None, run=lambda i: i.pre_uninstall(), end=None, ), component_order, instances, 'pre-uninstall', *removals ) general_package = "general" dependency_handler = self.distro.dependency_handler_class( self.distro, self.root_dir, instances.values()) self._run_phase( action.PhaseFunctors( start=lambda i: LOG.info("Uninstalling packages"), run=lambda i: dependency_handler.uninstall(), end=None, ), [general_package], {general_package: instances[general_package]}, "uninstall", *removals ) removals += ['install', 'download', 'configure', "download-patch", 'pre-install', 'post-install'] self._run_phase( action.PhaseFunctors( start=lambda i: LOG.info('Post-uninstalling %s.', colorizer.quote(i.name)), run=lambda i: i.post_uninstall(), end=None, ), component_order, instances, 'post-uninstall', *removals )
def clear_domains(self, virt_type, inst_prefix): libvirt = None try: # A late import is done since this code could be used before libvirt is actually # installed, and that will cause the top level python import to fail which will # make anvil not work, so import it dynamically to bypass the previous mechanism libvirt = importer.import_module('libvirt') except RuntimeError as e: pass if not libvirt: LOG.warn( "Could not clear out libvirt domains, libvirt not available for python." ) return virt_protocol = LIBVIRT_PROTOCOL_MAP.get(virt_type) if not virt_protocol: LOG.warn( "Could not clear out libvirt domains, no known protocol for virt type: %s", colorizer.quote(virt_type)) return LOG.info( "Attempting to clear out leftover libvirt domains using protocol: %s", colorizer.quote(virt_protocol)) try: self.restart_service() self.wait_active() except (excp.StartException, IOError) as e: LOG.warn("Could not restart the libvirt daemon due to: %s", e) return try: conn = libvirt.open(virt_protocol) except libvirt.libvirtError as e: LOG.warn( "Could not connect to libvirt using protocol %s due to: %s", colorizer.quote(virt_protocol), e) return with contextlib.closing(conn) as ch: try: defined_domains = ch.listDefinedDomains() kill_domains = list() for domain in defined_domains: if domain.startswith(inst_prefix): kill_domains.append(domain) if kill_domains: utils.log_iterable( kill_domains, logger=LOG, header="Found %s old domains to destroy" % (len(kill_domains))) for domain in sorted(kill_domains): self._destroy_domain(libvirt, ch, domain) except libvirt.libvirtError, e: LOG.warn("Could not clear out libvirt domains due to: %s", e)
def clear_domains(self, virt_type, inst_prefix): libvirt = None try: libvirt = importer.import_module('libvirt') except RuntimeError as e: pass if not libvirt: LOG.warn( "Could not clear out libvirt domains, libvirt not available for python." ) return virt_protocol = LIBVIRT_PROTOCOL_MAP.get(virt_type) if not virt_protocol: LOG.warn( "Could not clear out libvirt domains, no known protocol for virt type: %s", colorizer.quote(virt_type)) return with sh.Rooted(True): LOG.info( "Attempting to clear out leftover libvirt domains using protocol: %s", colorizer.quote(virt_protocol)) try: self.restart_service() self.wait_active() except (excp.StartException, IOError) as e: LOG.warn("Could not restart the libvirt daemon due to: %s", e) return try: conn = libvirt.open(virt_protocol) except libvirt.libvirtError as e: LOG.warn( "Could not connect to libvirt using protocol %s due to: %s", colorizer.quote(virt_protocol), e) return with contextlib.closing(conn) as ch: try: defined_domains = ch.listDefinedDomains() kill_domains = list() for domain in defined_domains: if domain.startswith(inst_prefix): kill_domains.append(domain) if kill_domains: utils.log_iterable( kill_domains, logger=LOG, header="Found %s old domains to destroy" % (len(kill_domains))) for domain in sorted(kill_domains): self._destroy_domain(libvirt, ch, domain) except libvirt.libvirtError, e: LOG.warn("Could not clear out libvirt domains due to: %s", e)
def post_install(self): binstall.PkgInstallComponent.post_install(self) bridges = self.get_option('bridges', default_value=[]) if bridges: LOG.info("Attempting to create %s bridges: %s." % (colorizer.quote(self.name), ", ".join(bridges))) LOG.info("Ensuring %s service is started before we use it." % colorizer.quote(self.name)) self.runtime.start() self.runtime.wait_active() for bridge in bridges: self._add_bridge(bridge)
def _match_distro(distros): plt = platform.platform() distro_matched = None for d in distros: if d.supports_platform(plt): distro_matched = d break if not distro_matched: raise excp.ConfigException('No distro matched for platform %r' % plt) else: LOG.info('Matched distro %s for platform %s', colorizer.quote(distro_matched.name), colorizer.quote(plt)) return distro_matched
def _run(self, persona, component_order, instances): self._run_phase( PhaseFunctors( start=lambda i: LOG.info('Stopping %s.', colorizer.quote(i.name)), run=lambda i: i.stop(), end=lambda i, result: LOG.info("Stopped %s items.", colorizer.quote(result)), ), component_order, instances, "Stopped" ) # Knock off and phase files that are connected to starting self._delete_phase_files(['start'])
def run_tests(self): app_dir = self.get_option('app_dir') if not sh.isdir(app_dir): LOG.warn("Unable to find application directory at %s, can not run %s tests.", colorizer.quote(app_dir), colorizer.quote(self.name)) return cmd = self._get_test_command() env = self._get_env() with open(os.devnull, 'wb') as null_fh: if self.get_bool_option("tests_verbose", default_value=False): null_fh = None sh.execute(*cmd, stdout_fh=None, stderr_fh=null_fh, cwd=app_dir, env_overrides=env)
def _run(self, persona, component_order, instances): removals = ['pre-start', 'start', 'post-start'] self._run_phase( action.PhaseFunctors( start=lambda i: LOG.info('Stopping %s.', colorizer.quote(i.name)), run=lambda i: i.stop(), end=lambda i, result: LOG.info("Stopped %s items.", colorizer.quote(result)), ), component_order, instances, "stopped", *removals )
def pre_uninstall(self): try: LOG.debug("Attempting to reset the rabbit-mq guest password to: %s", colorizer.quote(RESET_BASE_PW)) self.runtime.start() self.runtime.wait_active() cmd = self.distro.get_command('rabbit-mq', 'change_password') + [RESET_BASE_PW] sh.execute(cmd) LOG.info("Restarting so that your rabbit-mq password is reflected.") self.runtime.restart() self.runtime.wait_active() except IOError: LOG.warn(("Could not reset the rabbit-mq password. You might have to manually " "reset the password to %s before the next install"), colorizer.quote(RESET_BASE_PW))
def run(self, persona): groups = self._construct_instances(persona) LOG.info("Processing components for action %s.", colorizer.quote(self.name)) for group in persona.matched_components: utils.log_iterable( group, header="Activating group %s in the following order" % colorizer.quote(group.id), logger=LOG) self._on_start(persona, groups) self._run(persona, groups) self._on_finish(persona, groups)
def download(self): branch = None tag = None uri = self.uri if uri.find("?") != -1: # If we use urlparser here it doesn't seem to work right?? # TODO(harlowja), why?? (uri, params) = uri.split("?", 1) params = parse_qs(params) if 'branch' in params: branch = params['branch'][0].strip() if 'tag' in params: tag = params['tag'][0].strip() uri = uri.strip() if not branch: branch = 'master' if tag: # Avoid 'detached HEAD state' message by moving to a # $tag-anvil branch for that tag new_branch = "%s-%s" % (tag, 'anvil') checkout_what = [tag, '-b', new_branch] else: # Set it up to track the remote branch correctly new_branch = branch checkout_what = ['-t', '-b', new_branch, 'origin/%s' % branch] if sh.isdir(self.store_where) and sh.isdir(sh.joinpths(self.store_where, '.git')): LOG.info("Existing git directory located at %s, leaving it alone.", colorizer.quote(self.store_where)) # do git clean -xdfq and git reset --hard to undo possible changes cmd = ["git", "clean", "-xdfq"] sh.execute(cmd, cwd=self.store_where) cmd = ["git", "reset", "--hard"] sh.execute(cmd, cwd=self.store_where) else: LOG.info("Downloading %s (%s) to %s.", colorizer.quote(uri), branch, colorizer.quote(self.store_where)) cmd = ["git", "clone", uri, self.store_where] sh.execute(cmd) if tag: LOG.info("Adjusting to tag %s.", colorizer.quote(tag)) else: LOG.info("Adjusting branch to %s.", colorizer.quote(branch)) # detach, drop new_branch if it exists, and checkout to new_branch # newer git allows branch resetting: git checkout -B $new_branch # so, all these are for compatibility with older RHEL git cmd = ["git", "rev-parse", "HEAD"] git_head = sh.execute(cmd, cwd=self.store_where)[0].strip() cmd = ["git", "checkout", git_head] sh.execute(cmd, cwd=self.store_where) cmd = ["git", "branch", "-D", new_branch] sh.execute(cmd, cwd=self.store_where, check_exit_code=False) cmd = ["git", "checkout"] + checkout_what sh.execute(cmd, cwd=self.store_where)
def _filter_download_requires(self): yum_map = self._get_known_yum_packages() pip_origins = {} for line in self.pips_to_install: req = pip_helper.extract_requirement(line) pip_origins[req.key] = line pips_to_download = [] req_to_install = [ pip_helper.extract_requirement(line) for line in self.pips_to_install ] requested_names = [req.key for req in req_to_install] rpm_names = self.py2rpm_helper.names_to_rpm_names(requested_names) satisfied_list = [] for req in req_to_install: rpm_name = rpm_names[req.key] rpm_info = self._find_yum_match(yum_map, req, rpm_name) if not rpm_info: # We need the source requirement in case it's a url. pips_to_download.append(pip_origins[req.key]) else: satisfied_list.append((req, rpm_name, rpm_info)) yum_buff = six.StringIO() if satisfied_list: # Organize by repo repos = collections.defaultdict(list) for (req, rpm_name, rpm_info) in satisfied_list: repo = rpm_info['repo'] rpm_found = '%s-%s' % (rpm_name, rpm_info['version']) repos[repo].append( "%s as %s" % (colorizer.quote(req), colorizer.quote(rpm_found))) dep_info = { 'requirement': str(req), 'rpm': rpm_info, } yum_buff.write(json.dumps(dep_info)) yum_buff.write("\n") for r in sorted(repos.keys()): header = ("%s Python packages are already available " "as RPMs from repository %s") header = header % (len(repos[r]), colorizer.quote(r)) utils.log_iterable(sorted(repos[r]), logger=LOG, header=header, color=None) sh.write_file(self.yum_satisfies_filename, yum_buff.getvalue()) return pips_to_download
def run_tests(self): app_dir = self.get_option('app_dir') if not sh.isdir(app_dir): LOG.warn( "Unable to find application directory at %s, can not run %s tests.", colorizer.quote(app_dir), colorizer.quote(self.name)) return cmd = self._get_test_command() env = self._get_env() sh.execute(*cmd, stdout_fh=None, stderr_fh=None, cwd=app_dir, env_overrides=env)
def _run(self, persona, component_order, instances): removals = states.reverts("stopped") self._run_phase( action.PhaseFunctors( start=lambda i: LOG.info('Stopping %s.', colorizer.quote(i.name)), run=lambda i: i.stop(), end=lambda i, result: LOG.info("Stopped %s application(s).", colorizer.quote(result)), ), component_order, instances, "stopped", *removals )
def _locate_investigators(self, apps_started): investigator_created = {} to_investigate = [] for (app_name, _trace_fn, run_type) in apps_started: investigator = investigator_created.get(run_type) if investigator is None: try: investigator = importer.construct_entry_point(run_type, self) investigator_created[run_type] = investigator except RuntimeError as e: LOG.warn("Could not load class %s which should be used to investigate %s: %s", colorizer.quote(run_type), colorizer.quote(app_name), e) continue to_investigate.append((app_name, investigator)) return to_investigate
def _run(self, persona, component_order, instances): dependency_handler_class = self.distro.dependency_handler_class dependency_handler = dependency_handler_class(self.distro, self.root_dir, instances.values(), self.cli_opts) removals = states.reverts("unconfigure") self._run_phase( action.PhaseFunctors( start=lambda i: LOG.info('Unconfiguring %s.', colorizer.quote(i.name)), run=lambda i: i.unconfigure(), end=None, ), component_order, instances, 'unconfigure', *removals) removals.extend(states.reverts('pre-uninstall')) self._run_phase( action.PhaseFunctors( start=None, run=lambda i: i.pre_uninstall(), end=None, ), component_order, instances, 'pre-uninstall', *removals) removals.extend(states.reverts("package-uninstall")) general_package = "general" self._run_phase( action.PhaseFunctors( start=lambda i: LOG.info("Uninstalling packages"), run=lambda i: dependency_handler.uninstall(), end=None, ), [general_package], {general_package: instances[general_package]}, "package-uninstall", *removals)
def _filter_download_requires(self): yum_map = self._get_known_yum_packages() pip_origins = {} for line in self.pips_to_install: req = pip_helper.extract_requirement(line) pip_origins[req.key] = line pips_to_download = [] req_to_install = [ pip_helper.extract_requirement(line) for line in self.pips_to_install ] requested_names = [req.key for req in req_to_install] rpm_to_install = self._convert_names_python2rpm(requested_names) satisfied_list = [] for (req, rpm_name) in zip(req_to_install, rpm_to_install): (version, repo) = self._find_yum_match(yum_map, req, rpm_name) if not repo: # We need the source requirement incase its a url. pips_to_download.append(pip_origins[req.key]) else: satisfied_list.append((req, rpm_name, version, repo)) if satisfied_list: # Organize by repo repos = collections.defaultdict(list) for (req, rpm_name, version, repo) in satisfied_list: repos[repo].append("%s as %s-%s" % (req, rpm_name, version)) for r in sorted(repos.keys()): header = ("%s Python packages are already available " "as RPMs from repository %s") header = header % (len(repos[r]), colorizer.quote(r)) utils.log_iterable(sorted(repos[r]), logger=LOG, header=header) return pips_to_download
def install(self): """Process image installation.""" url_fn = self._extract_url_fn() if not url_fn: raise IOError("Can not determine file name from url: %r" % self._url) if self._cache.is_valid: LOG.info("Found valid cached image+metadata at: %s", colorizer.quote(self._cache.path)) image_details = self._cache.load_details() else: sh.mkdir(self._cache.path) if not self._is_url_local(): fetched_fn, bytes_down = down.UrlLibDownloader( self._url, sh.joinpths(self._cache.path, url_fn)).download() LOG.debug("For url %s we downloaded %s bytes to %s", self._url, bytes_down, fetched_fn) else: fetched_fn = self._url image_details = Unpacker().unpack(url_fn, fetched_fn, self._cache.path) self._cache.save_details(image_details) image_name = self._generate_image_name(url_fn) image_id = self._register(image_name, image_details) return image_name, image_id
def _connect_roles(self, users, roles_made, tenants_made, users_made): roles_attached = set() for info in users: name = info['name'] if name in roles_attached: LOG.warn("Already attached roles to user %s", colorizer.quote(name)) roles_attached.add(name) user = users_made[name] for role_entry in info['roles']: # Role:Tenant (r, _sep, t) = role_entry.partition(":") role_name = r tenant_name = t if not role_name or not tenant_name: raise RuntimeError( "Role or tenant name missing for user %s" % (name)) if not role_name in roles_made: raise RuntimeError( "Role %s not previously created for user %s" % (role_name, name)) if not tenant_name in tenants_made: raise RuntimeError( "Tenant %s not previously created for user %s" % (tenant_name, name)) user_role = { 'user': user, 'role': roles_made[role_name], 'tenant': tenants_made[tenant_name], } self.client.roles.add_user_role(**user_role)
def _run(self, persona, component_order, instances): removals = ['configure'] self._run_phase( action.PhaseFunctors( start=lambda i: LOG.info('Unconfiguring %s.', colorizer.quote(i.name)), run=lambda i: i.unconfigure(), end=None, ), component_order, instances, 'unconfigure', *removals) removals += ['post-install'] self._run_phase( action.PhaseFunctors( start=None, run=lambda i: i.pre_uninstall(), end=None, ), component_order, instances, 'pre-uninstall', *removals) removals += ['package-install'] general_package = "general" dependency_handler = self.distro.dependency_handler_class( self.distro, self.root_dir, instances.values()) self._run_phase( action.PhaseFunctors( start=lambda i: LOG.info("Uninstalling packages"), run=lambda i: dependency_handler.uninstall(), end=None, ), [general_package], {general_package: instances[general_package]}, "package-uninstall", *removals)
def _configure_symlinks(self): links = self.symlinks if not links: return 0 # This sort happens so that we link in the correct order # although it might not matter. Either way. We ensure that the right # order happens. Ie /etc/blah link runs before /etc/blah/blah link_srcs = sorted(links.keys()) link_srcs.reverse() link_nice = [] for source in link_srcs: links_to_be = links[source] for link in links_to_be: link_nice.append("%s => %s" % (link, source)) utils.log_iterable(link_nice, logger=LOG, header="Creating %s sym-links" % (len(link_nice))) links_made = 0 for source in link_srcs: links_to_be = links[source] for link in links_to_be: try: LOG.debug("Symlinking %s to %s.", link, source) sh.symlink(source, link, tracewriter=self.tracewriter) links_made += 1 except (IOError, OSError) as e: LOG.warn("Symlinking %s to %s failed: %s", colorizer.quote(link), colorizer.quote(source), e) return links_made
def _run(self, persona, component_order, instances): dependency_handler_class = self.distro.dependency_handler_class dependency_handler = dependency_handler_class(self.distro, self.root_dir, instances.values(), self.cli_opts) removals = states.reverts("package-install-all-deps") general_package = "general" self._run_phase( action.PhaseFunctors( start=lambda i: LOG.info("Installing packages"), run=lambda i: dependency_handler.install_all_deps(), end=None, ), [general_package], {general_package: instances[general_package]}, "package-install-all-deps", *removals) self._run_phase( action.PhaseFunctors( start=lambda i: LOG.info('Running tests of component %s.', colorizer.quote(i.name)), run=lambda i: i.run_tests(), end=None, ), component_order, instances, None, )
def _verify_components(self, groups): for group, instances in groups: LOG.info( "Verifying that the components of group %s are ready" " to rock-n-roll.", colorizer.quote(group)) for _c, instance in six.iteritems(instances): instance.verify()
def _configure_db_confs(self): LOG.info("Fixing up %s mysql configs.", colorizer.quote(self.distro.name)) my_cnf = ini_parser.RewritableConfigParser(fns=[DBInstaller.MYSQL_CONF]) my_cnf.remove_option('mysqld', 'skip-grant-tables') my_cnf.set('mysqld', 'default-storage-engine', 'InnoDB') my_cnf.set('mysqld', 'bind-address', '0.0.0.0') sh.write_file_and_backup(DBInstaller.MYSQL_CONF, my_cnf.stringify())
def post_start(self): if not sh.isfile(self.init_fn) and self.get_bool_option('do-init'): self.wait_active() LOG.info("Running commands to initialize keystone.") (fn, contents) = utils.load_template(self.name, INIT_WHAT_FN) LOG.debug("Initializing with contents of %s", fn) params = {} params['keystone'] = khelper.get_shared_params(**utils.merge_dicts(self.options, khelper.get_shared_passwords(self))) params['glance'] = ghelper.get_shared_params(ip=self.get_option('ip'), **self.get_option('glance')) params['nova'] = nhelper.get_shared_params(ip=self.get_option('ip'), **self.get_option('nova')) params['neutron'] = net_helper.get_shared_params(ip=self.get_option('ip'), **self.get_option('neutron')) params['cinder'] = chelper.get_shared_params(ip=self.get_option('ip'), **self.get_option('cinder')) wait_urls = [ params['keystone']['endpoints']['admin']['uri'], params['keystone']['endpoints']['public']['uri'], ] for url in wait_urls: utils.wait_for_url(url) init_what = utils.load_yaml_text(contents) init_what = utils.expand_template_deep(init_what, params) try: init_how = khelper.Initializer(params['keystone']['service_token'], params['keystone']['endpoints']['admin']['uri']) init_how.initialize(**init_what) except RuntimeError: LOG.exception("Failed to initialize keystone, is the keystone client library available?") else: # Writing this makes sure that we don't init again sh.write_file(self.init_fn, utils.prettify_yaml(init_what)) LOG.info("If you wish to re-run initialization, delete %s", colorizer.quote(self.init_fn))
def wait_for_url(url, max_attempts=5): LOG.info("Waiting for url %s to become active (max_attempts=%s)", colorizer.quote(url), max_attempts) def waiter(sleep_secs): LOG.info("Sleeping for %s seconds, %s is still not active.", sleep_secs, colorizer.quote(url)) sh.sleep(sleep_secs) def success(attempts): LOG.info("Url %s became active after %s attempts!", colorizer.quote(url), attempts) excps = [] attempts = 0 for sleep_time in ExponentialBackoff(attempts=max_attempts): attempts += 1 try: with contextlib.closing(urllib2.urlopen(urllib2.Request(url))) as req: req.read() success(attempts) return except urllib2.HTTPError as e: if e.code in range(200, 600): # Should be ok, at least its responding... # although potentially incorrectly... success(attempts) return else: excps.append(e) waiter(sleep_time) except IOError as e: excps.append(e) waiter(sleep_time) if excps: raise excps[-1]
def _start_app(self, program, starter): app_working_dir = program.working_dir if not app_working_dir: app_working_dir = self.get_option('app_dir') # Un-templatize whatever argv (program options) the program has specified # with whatever program params were retrieved to create the 'real' set # of program options (if applicable) app_params = self.app_params(program) if app_params: app_argv = [ utils.expand_template(arg, app_params) for arg in program.argv ] else: app_argv = program.argv LOG.debug("Starting %r using a %r", program.name, starter) # TODO(harlowja): clean this function params up (should just take a program) details_path = starter.start(program.name, app_pth=program.path, app_dir=app_working_dir, opts=app_argv) # This trace is used to locate details about what/how to stop LOG.info("Started program %s under component %s.", colorizer.quote(program.name), self.name) self.tracewriter.app_started(program.name, details_path, starter.name)
def _sync_db(self): LOG.info("Syncing keystone to database: %s", colorizer.quote(DB_NAME)) sync_cmd = MANAGE_CMD + ['db_sync'] cmds = [{'cmd': sync_cmd, 'run_as_root': True}] utils.execute_template(*cmds, cwd=self.bin_dir, params=self.config_params(None))
def wait_for_url(url, max_attempts=3, wait_between=5): excps = [] LOG.info( "Waiting for url %s to become active (max_attempts=%s, seconds_between=%s)", colorizer.quote(url), max_attempts, wait_between) def waiter(): LOG.info("Sleeping for %s seconds, %s is still not active.", wait_between, colorizer.quote(url)) sh.sleep(wait_between) def success(attempts): LOG.info("Url %s became active after %s attempts!", colorizer.quote(url), attempts) for i in range(0, max_attempts): try: with contextlib.closing(urllib2.urlopen( urllib2.Request(url))) as req: req.read() success(i + 1) return except urllib2.HTTPError as e: if e.code in xrange(200, 499) or e.code in [501]: # Should be ok, at least its responding... success(i + 1) return else: excps.append(e) waiter() except IOError as e: excps.append(e) waiter() if excps: raise excps[-1]
def _do_network_init(self): if not sh.isfile( self.net_init_fn) and self.get_bool_option('do-network-init'): # Figure out the commands to run cmds = [] mp = {} if self.get_bool_option('enable_fixed'): # Create a fixed network mp['FIXED_NETWORK_SIZE'] = self.get_option( 'fixed_network_size', default_value='256') mp['FIXED_RANGE'] = self.get_option( 'fixed_range', default_value='10.0.0.0/24') cmds.extend(FIXED_NET_CMDS) if self.get_bool_option('enable_floating'): # Create a floating network + test floating pool cmds.extend(FLOATING_NET_CMDS) mp['FLOATING_RANGE'] = self.get_option( 'floating_range', default_value='172.24.4.224/28') mp['TEST_FLOATING_RANGE'] = self.get_option( 'test_floating_range', default_value='192.168.253.0/29') mp['TEST_FLOATING_POOL'] = self.get_option( 'test_floating_pool', default_value='test') # Anything to run?? if cmds: LOG.info( "Creating your nova network to be used with instances.") utils.execute_template(*cmds, params=mp) # Writing this makes sure that we don't init again cmd_mp = { 'cmds': cmds, 'replacements': mp, } sh.write_file(self.net_init_fn, utils.prettify_yaml(cmd_mp)) LOG.info("If you wish to re-run network initialization, delete %s", colorizer.quote(self.net_init_fn))
def _sync_db(self): LOG.info("Syncing glance to database: %s", colorizer.quote(self.configurator.DB_NAME)) cmds = [{'cmd': SYNC_DB_CMD}] utils.execute_template(*cmds, cwd=self.bin_dir, params=self.config_params(None))
def _find_pieces(self, files, files_location): """ Match files against the patterns in KERNEL_CHECKS, RAMDISK_CHECKS, and ROOT_CHECKS to determine which files contain which image parts. """ kernel_fn = None ramdisk_fn = None img_fn = None utils.log_iterable( files, logger=LOG, header= "Looking at %s files from %s to find the kernel/ramdisk/root images" % (len(files), colorizer.quote(files_location))) for fn in files: if self._pat_checker(fn, KERNEL_CHECKS): kernel_fn = fn LOG.debug("Found kernel: %r" % (fn)) elif self._pat_checker(fn, RAMDISK_CHECKS): ramdisk_fn = fn LOG.debug("Found ram disk: %r" % (fn)) elif self._pat_checker(fn, ROOT_CHECKS): img_fn = fn LOG.debug("Found root image: %r" % (fn)) else: LOG.debug("Unknown member %r - skipping" % (fn)) return (img_fn, ramdisk_fn, kernel_fn)