def setUp(self): InstallEngine._instance = None InstallEngine() self.engine = InstallEngine.get_instance() self.doc = self.engine.data_object_cache.volatile self.soft_node = Software("CPIO_Transfer", "CPIO") self.tr_node = CPIOSpec() self.soft_node.insert_children([self.tr_node]) self.doc.insert_children([self.soft_node]) self.tr_cpio = TransferCPIO("CPIO_Transfer")
def setUp(self): InstallEngine._instance = None InstallEngine() self.engine = InstallEngine.get_instance() self.doc = self.engine.data_object_cache.volatile self.soft_node = Software("P5I transfer") self.tr_node = P5ISpec() dst = Destination() self.ips_image = Image(IPS_IMG_DIR, "create") dst.insert_children([self.ips_image]) self.soft_node.insert_children([self.tr_node, dst]) self.doc.insert_children([self.soft_node])
def setUp(self): InstallEngine._instance = None InstallEngine() self.engine = InstallEngine.get_instance() self.doc = self.engine.data_object_cache.volatile self.soft_node = Software("IPS transfer") self.tr_node = IPSSpec() dst = Destination() self.ips_image = Image(self.IPS_IMG_DIR, "create") ips_im_type = ImType("full") self.ips_image.insert_children([ips_im_type]) dst.insert_children([self.ips_image]) self.soft_node.insert_children([self.tr_node, dst]) self.doc.insert_children([self.soft_node]) self.tr_ips = TransferIPS("IPS transfer")
def setUp(self): InstallEngine._instance = None InstallEngine() self.engine = InstallEngine.get_instance() self.doc = self.engine.data_object_cache.volatile self.soft_node = Software("SVR4Transfer", "SVR4") self.tr_node = SVR4Spec() self.soft_node.insert_children([self.tr_node]) self.doc.insert_children([self.soft_node]) self.tr_svr4 = TransferSVR4("SVR4Transfer") self.make_dummy_pkg(self.TEST_SRC_DIR + "/SUNWpkg1") self.make_dummy_pkg(self.TEST_SRC_DIR + "/SUNWpkg2") self.make_dummy_pkg(self.TEST_SRC_DIR + "/SUNWpkg3") if not os.path.isdir(AbstractSVR4.ADMIN_FILE_DIR): os.makedirs(AbstractSVR4.ADMIN_FILE_DIR, 0755)
def _prepare_engine(options): '''Initialize the InstallEngine''' InstallEngine(default_log=options.logname, loglevel=options.log_level, debug=options.debug) logger = logging.getLogger(INSTALL_LOGGER_NAME) # Don't set the global LOGGER until we're certain that logging # is up and running, so the main() except clause can figure out # if exception data can be written to the log or if it needs to # dump to stdout global LOGGER LOGGER = logger terminalui.init_logging(INSTALL_LOGGER_NAME) # if no directory in output profile path if not os.path.dirname(options.profile): # explicitly provide default directory for manifest writer options.profile = './' + options.profile register_checkpoint(sc_profile=options.profile)
print indent_str(indent, str(slc)) indent -= 1 indent -= 1 indent -= 1 indent -= 1 # parse command line arguments options, args = parse_args() if options.libdiskmgt: print_libdiskmgt() if not options.require_td: sys.exit() # set up Target Discovery and execute it, finding the entire system InstallEngine() TD = TargetDiscovery("Test TD") # set dry_run to True so we don't label any drives on SPARC TD.execute(dry_run=True) if options.disk: print_disk(TD.doc) if options.partition: print_partition(TD.doc) if options.slc: print_slice(TD.doc) if options.zpool: print_zpool(TD.doc) if options.xml: print TD.doc.get_xml_tree_str()
def main(): """Set up needed patches for 'archiveadm create' to achieve a more suitable 'Oracle clean' version of a distributable archive, then invoke the create. """ argd = parse_cli() # Patch the image prep checkpoint. This is where we can do a more thorough # cleanup of the image and unset all publishers in the image as well. If # replacement publishers were passed by the caller, we can set them here, # after the image is prepared. orig_prepare_image = PrepareArchiveImage.execute @monkeypatch_method(PrepareArchiveImage) def execute(instance, dry_run=False): # Start by calling the original PrepareArchiveImage.execute method orig_prepare_image(instance, dry_run) # Now run our tactical nuke to deep clean the image clean_image(instance) # Modify all global zone publishers. Unset all by default, set # replacement ones if passed. set_pubs = argd.get('set_publishers', []) reset_publishers(instance, set_pubs) return # Patch the descriptor creation checkpoint. Here we can swap the archived # zone names with fakes if we were passed a mapping. Also, set in a mock # source hostname if provided. Rather than save off the original and add to # it as we did with PrepareArchiveImage, we just override this one. @monkeypatch_method(CreateArchiveDescriptor) def execute(instance, dry_run=False): # Do the same as the stock checkpoint to get things started instance._parse_doc() instance.logger.debug("CreateArchiveDescriptor: UnifiedArchive [%s]", instance.ua.uuid) instance.ua.generate_descriptor() # Now we've run generate_descriptor(), we have the descriptor XML ready # in memory. Before we commit it out to disk in the OVF file, modify it # as needed. # First mock the source host name in the descriptor. If one was not # provided, use a default of 'solaris'. set_source_host(instance, argd.get('source_host', 'solaris')) # If provided, mock up the zonenames with the ones passed in. zonemap = {} if argd.get('zone_mappings'): # If we have a list of zone mappings, create a dictionary from # them. Keys are original zonenames, values are the mocks. orig = [s.split(':')[0] for s in argd.get('zone_mappings')] mock = [s.split(':')[1] for s in argd.get('zone_mappings')] # Pass in the mock name mappings and override the real zonenames. set_zonenames(instance, dict(zip(orig, mock))) # Finally, commit the descriptor with the modified state. instance.ua.commit_descriptor() return # Begin the process of archive creation from solaris_install.archive import cli from solaris_install.archive.archive_operations import create_unified_archive # Since this operation is outside of the archiveadm framework, all of the # preliminary set up has to be done. install_engine = InstallEngine(LOGFILE) install_engine.doc.volatile.insert_children( ApplicationData("archive-cleanuar", logname=LOGFILE)) print("\nLogging to %s\n") % LOGFILE subcommand, options, args = cli._parse_input() #obtain the path, check it, and make it an absolute path, if necessary. path = args[0] uri = urlparse(path, scheme='file') if uri.scheme != 'file': raise SystemExit(_("archiveadm create: file-based path required")) path = os.path.abspath(path) # For the time being, we will call the archive api directly. create_unified_archive(path, zones=options.zones, exclude_zones=options.exclude_zones, exclude_ds=options.exclude_ds, recovery=options.recovery, skip_check=options.skip_check, exclude_media=options.exclude_media, root_only=options.root_only)
def setUp(self): InstallEngine._instance = None InstallEngine() self.engine = InstallEngine.get_instance() self.doc = self.engine.data_object_cache.volatile
def main(): """ primary execution function for distro_const """ # clear the error service to be sure that we start with a clean slate errsvc.clear_error_list() options, args = parse_args() manifest = args[-1] pause_checkpoint = None resume_checkpoint = None verbose = options.verbose list_cps = options.list_checkpoints try: # We initialize the Engine with stop_on_error set so that if there are # errors during manifest parsing, the processing stops eng = InstallEngine(debug=False, stop_on_error=True) global DC_LOGGER DC_LOGGER = logging.getLogger(INSTALL_LOGGER_NAME) # set the logfile name log_name = "log.%s" % time.strftime("%Y-%m-%d.%H:%M") detail_log_name = "detail-%s" % log_name simple_log_name = "simple-%s" % log_name # create an additional FileHandler for a simple log base, logfile = os.path.split(DEFAULTLOG) simple_logname = os.path.join(base, "simple-" + logfile) simple_fh = FileHandler(simple_logname) simple_fh.setLevel(logging.INFO) DC_LOGGER.addHandler(simple_fh) if options.resume_checkpoint: resume_checkpoint = options.resume_checkpoint DC_LOGGER.info("distro_const will resume from: " + \ resume_checkpoint) if options.pause_checkpoint: pause_checkpoint = options.pause_checkpoint DC_LOGGER.info("distro_const will pause at: " + pause_checkpoint) # create a simple StreamHandler to output messages to the screen set_stream_handler(DC_LOGGER, list_cps, verbose) base_dataset = None parse_manifest(manifest) # get a reference to the data object cache doc = eng.data_object_cache # validate the target section of the manifest zpool_name, base_dataset, base_action, base_dataset_mp = \ validate_target() if list_cps: # set the execute flag of setup_build_dataset to 'False' # to prevent any actions from occuring. The TI checkpoint # needs to be registered with the engine for list_checkpoints # to work correctly. setup_build_dataset(zpool_name, base_dataset, base_action, base_dataset_mp, resume_checkpoint, execute=False) # set the InstallEngine.dataset property to enable snapshots eng.dataset = os.path.join(zpool_name, base_dataset, "build_data") list_checkpoints(DC_LOGGER) else: (base_dataset_mp, build_data_mp, logs_mp, media_mp) = \ setup_build_dataset(zpool_name, base_dataset, base_action, base_dataset_mp, resume_checkpoint) # update the DOC with actual directory values update_doc_paths(build_data_mp) # lock the dataset with Lockfile(os.path.join(base_dataset_mp, DC_LOCKFILE)): # output the log file path to the screen and transfer the logs new_detaillog = os.path.join(logs_mp, detail_log_name) new_simplelog = os.path.join(logs_mp, simple_log_name) DC_LOGGER.info("Simple log: %s" % new_simplelog) DC_LOGGER.info("Detail Log: %s" % new_detaillog) DC_LOGGER.transfer_log(destination=new_detaillog) simple_fh.transfer_log(destination=new_simplelog) # set the http_proxy if one is specified in the manifest dc_set_http_proxy(DC_LOGGER) # reset the InstallEngine.dataset property to enable snapshots eng.dataset = os.path.join(zpool_name, base_dataset, "build_data") # register each checkpoint listed in the execution section registered_checkpoints = register_checkpoints(DC_LOGGER) # now populate the DOC with the common information needed # by the various checkpoints -- pkg_img_path, etc doc_dict = { "pkg_img_path": os.path.join(build_data_mp, "pkg_image"), "ba_build": os.path.join(build_data_mp, "boot_archive"), "tmp_dir": os.path.join(build_data_mp, "tmp"), "media_dir": media_mp } doc.volatile.insert_children( DataObjectDict(DC_LABEL, doc_dict, generate_xml=True)) # if we're trying to pause at the very first checkpoint, # there's nothing to execute, so return 0 if pause_checkpoint == registered_checkpoints[0][0]: return 0 execute_checkpoint(new_detaillog, resume_checkpoint, pause_checkpoint) # catch any errors and log them. except BaseException as msg: if DC_LOGGER is not None: DC_LOGGER.exception(msg) else: # DC_LOGGER hasn't even been setup and we ran into an error print msg return 1 finally: if DC_LOGGER is not None: DC_LOGGER.close() return 0
def prepare_engine(options): ''' Instantiate the engine, setup logging, and register all the checkpoints to be used for doing the install. ''' eng = InstallEngine(debug=options.debug) # setup_logging() must be called after the engine is initialized. setup_logging(options.logname, options.log_level) terminalui.init_logging(INSTALL_LOGGER_NAME) # Information regarding checkpoints used for the Text Installer. # The values specified are used as arguments for registering the # checkpoint. If function signature for any of the checkpoints are # is modified, these values need to be modified as well. eng.register_checkpoint(TARGET_DISCOVERY, "solaris_install/target/discovery", "TargetDiscovery") eng.register_checkpoint(TRANSFER_PREP, "solaris_install/transfer/media_transfer", "init_prepare_media_transfer") eng.register_checkpoint(VARSHARE_DATASET, "solaris_install/target/varshare", "VarShareDataset") eng.register_checkpoint(TARGET_INIT, "solaris_install/target/instantiation", "TargetInstantiation") # The following 3 are transfer checkpoints eng.register_checkpoint(TRANSFER_ROOT, "solaris_install/transfer/cpio", "TransferCPIO") eng.register_checkpoint(TRANSFER_MISC, "solaris_install/transfer/cpio", "TransferCPIO") eng.register_checkpoint(TRANSFER_MEDIA, "solaris_install/transfer/cpio", "TransferCPIO") # sys config checkpoint must be registered after transfer checkpoints sysconfig.register_checkpoint() # rest of the checkpoints are for finishing up the install process eng.register_checkpoint(CLEANUP_CPIO_INSTALL, "solaris_install/ict/cleanup_cpio_install", "CleanupCPIOInstall") eng.register_checkpoint(INIT_SMF, "solaris_install/ict/initialize_smf", "InitializeSMF") eng.register_checkpoint(BOOT_CONFIG, "solaris_install/boot/boot", "SystemBootMenu") eng.register_checkpoint(DUMP_ADMIN, "solaris_install/ict/update_dumpadm", "UpdateDumpAdm") eng.register_checkpoint(DEVICE_CONFIG, "solaris_install/ict/device_config", "DeviceConfig") eng.register_checkpoint(APPLY_SYSCONFIG, "solaris_install/ict/apply_sysconfig", "ApplySysConfig") eng.register_checkpoint(BOOT_ARCHIVE, "solaris_install/ict/boot_archive", "BootArchive") # Build up list of files to be added to DataObjectCache for transfer # to new boot environment. tf_dict = dict() tf_dict['/var/adm/messages'] = post_install_logs_path('messages') add_transfer_files_to_doc(TRANSFER_FILES, tf_dict) eng.register_checkpoint(TRANSFER_FILES, "solaris_install/ict/transfer_files", "TransferFiles") eng.register_checkpoint(CREATE_SNAPSHOT, "solaris_install/ict/create_snapshot", "CreateSnapshot")
def main(): '''Main routine for the gui-install-er''' _init_locale() # This is needed or InstallEngine threading won't work gtk.gdk.threads_init() # check we are running as root if os.getuid() != 0: sys.exit( _("The %s installer must be run as " "root. Quitting.") % RELEASE) if other_instance_is_running(): modal_dialog( _("Installer Startup Terminated"), _("Only one instance of this Installer is allowed. " "Another instance is already running.")) sys.exit( _("Only one instance of this Installer is allowed. " "Another instance is already running.")) write_pid_file() usage = "usage: %prog [-l FILE] [-v LEVEL] [-d]" parser = OptionParser(usage=usage, version="%prog 1.1") parser.add_option("-l", "--log-location", dest="logname", help=_("Set log location to FILE (default: %default)"), metavar="FILE", default=DEFAULT_LOG_LOCATION) parser.add_option("-v", "--log-level", dest="log_level", default=None, help=_("Set log verbosity to LEVEL. In order of " "increasing verbosity, valid values are 'error' " "'warn' 'info' 'debug' or 'input'\n[default:" " %default]"), choices=["error", "warn", "info", "debug", "input"], metavar="LEVEL") parser.add_option("-d", "--debug", action="store_true", dest="debug", default=False, help=_("Enable debug mode. Sets " "logging level to 'input' and enables CTRL-C for " "killing the program\n")) options, args = parser.parse_args() if options.log_level is None: if options.debug: options.log_level = "debug" else: options.log_level = DEFAULT_LOG_LEVEL engine = InstallEngine(loglevel=options.log_level, debug=True) try: logger = setup_logging(options.logname, options.log_level) except IOError, err: parser.error("%s '%s'" % (err.strerror, err.filename))