def _unpack(self): parts = self.download_name.split('.') if self.download_name.endswith('.tgz') \ or self.download_name.endswith('.tar.gz'): LOG.info('Extracting %s', self.download_file_name) self.image_name = self.download_name\ .replace('.tgz', '').replace('.tar.gz', '') self.tmp_folder = shell.joinpths(Image.tmpdir, parts[0]) shell.mkdir(self.tmp_folder) tar = tarfile.open(self.download_file_name) tar.extractall(self.tmp_folder) for file_ in shell.listdir(self.tmp_folder): if file_.find('vmlinuz') != -1: self.kernel = shell.joinpths(self.tmp_folder, file_) elif file_.find('initrd') != -1: self.initrd = shell.joinpths(self.tmp_folder, file_) elif file_.endswith('.img'): self.image = shell.joinpths(self.tmp_folder, file_) else: pass elif self.download_name.endswith('.img') \ or self.download_name.endswith('.img.gz'): self.image_name = self.download_name.split('.img')[0] self.image = self.download_file_name else: raise IOError('Unknown image format for download %s' % (self.download_name))
def __init__(self, desired_subsystems, subsystem_info, runner, component_dir, all_instances, options, name, *args, **kargs): self.desired_subsystems = desired_subsystems self.instances = all_instances self.component_name = name self.subsystem_info = subsystem_info self.options = options # The runner has a reference to us, so use a weakref here to # avoid breaking garbage collection. self.runner = weakref.proxy(runner) # Parts of the global runner context that we use self.cfg = runner.cfg self.pw_gen = runner.pw_gen self.distro = runner.distro # Required component directories self.component_dir = component_dir self.trace_dir = sh.joinpths(self.component_dir, settings.COMPONENT_TRACE_DIR) self.app_dir = sh.joinpths(self.component_dir, settings.COMPONENT_APP_DIR) self.cfg_dir = sh.joinpths(self.component_dir, settings.COMPONENT_CONFIG_DIR)
def _unpack_tar(self, file_name, file_location, tmp_dir): (root_name, _) = os.path.splitext(file_name) (root_img_fn, ramdisk_fn, kernel_fn) = self._find_pieces(file_location) if not root_img_fn: msg = "Image %r has no root image member" % (file_name) raise RuntimeError(msg) extract_dir = sh.joinpths(tmp_dir, root_name) sh.mkdir(extract_dir) LOG.info("Extracting %r to %r", file_location, extract_dir) with contextlib.closing(tarfile.open(file_location, 'r')) as tfh: tfh.extractall(extract_dir) info = dict() if kernel_fn: info['kernel'] = { 'FILE_NAME': sh.joinpths(extract_dir, kernel_fn), 'DISK_FORMAT': 'aki', 'CONTAINER_FORMAT': 'aki', } if ramdisk_fn: info['ramdisk'] = { 'FILE_NAME': sh.joinpths(extract_dir, ramdisk_fn), 'DISK_FORMAT': 'ari', 'CONTAINER_FORMAT': 'ari', } info['FILE_NAME'] = sh.joinpths(extract_dir, root_img_fn) info['DISK_FORMAT'] = 'ami' info['CONTAINER_FORMAT'] = 'ami' return info
def _setup_binaries(self): sh.move(sh.joinpths(self.cfg_dir, SWIFT_MAKERINGS), self.makerings_file) sh.chmod(self.makerings_file, 0777) self.tracewriter.file_touched(self.makerings_file) sh.move(sh.joinpths(self.cfg_dir, SWIFT_STARTMAIN), self.startmain_file) sh.chmod(self.startmain_file, 0777) self.tracewriter.file_touched(self.startmain_file)
def _unpack_tar(self, file_name, file_location, tmp_dir): (root_name, _) = os.path.splitext(file_name) kernel_fn = None ramdisk_fn = None root_img_fn = None with contextlib.closing(tarfile.open(file_location, "r")) as tfh: for tmemb in tfh.getmembers(): fn = tmemb.name if KERNEL_FN_MATCH.match(fn): kernel_fn = fn LOG.debug("Found kernel: %r" % (fn)) elif RAMDISK_FN_MATCH.match(fn): ramdisk_fn = fn LOG.debug("Found ram disk: %r" % (fn)) elif IMAGE_FN_MATCH.match(fn): root_img_fn = fn LOG.debug("Found root image: %r" % (fn)) else: LOG.debug("Unknown member %r - skipping" % (fn)) if not root_img_fn: msg = "Image %r has no root image member" % (file_name) raise RuntimeError(msg) extract_dir = sh.joinpths(tmp_dir, root_name) sh.mkdir(extract_dir) LOG.info("Extracting %r to %r", file_location, extract_dir) with contextlib.closing(tarfile.open(file_location, "r")) as tfh: tfh.extractall(extract_dir) locations = dict() if kernel_fn: locations["kernel"] = sh.joinpths(extract_dir, kernel_fn) if ramdisk_fn: locations["ramdisk"] = sh.joinpths(extract_dir, ramdisk_fn) locations["image"] = sh.joinpths(extract_dir, root_img_fn) return locations
def _get_param_map(self, app_name): param_dict = comp.ProgramRuntime._get_param_map(self, app_name) if app_name == APP_Q_AGENT: param_dict["OVS_CONFIG_FILE"] = sh.joinpths(self.cfg_dir, AGENT_CONF) elif app_name == APP_Q_SERVER: param_dict["QUANTUM_CONFIG_FILE"] = sh.joinpths(self.cfg_dir, QUANTUM_CONF) return param_dict
def _get_param_map(self, app_name): param_dict = comp.ProgramRuntime._get_param_map(self, app_name) if app_name == APP_Q_AGENT: tgt_loc = [self.appdir] + AGENT_LOC + [AGENT_CONF] param_dict['OVS_CONFIG_FILE'] = sh.joinpths(*tgt_loc) elif app_name == APP_Q_SERVER: param_dict['QUANTUM_CONFIG_FILE'] = sh.joinpths(self.appdir, CONFIG_DIR, QUANTUM_CONF) return param_dict
def _create_nodes(self): for i in range(1, 5): self.tracewriter.make_dir(sh.joinpths(self.fs_dev, '%d/node' % i)) self.tracewriter.symlink(sh.joinpths(self.fs_dev, str(i)), sh.joinpths(self.datadir, str(i))) start_port = (6010 + (i - 1) * 5) self._create_node_config(i, start_port) self._delete_templates()
def _get_target_config_name(self, config_name): if config_name == HORIZON_PY_CONF: return sh.joinpths(self.dash_dir, *HORIZON_PY_CONF_TGT) elif config_name == HORIZON_APACHE_CONF: #this may require sudo of the whole program to be able to write here. return sh.joinpths(*HORIZON_APACHE_TGT) else: return comp.PythonInstallComponent._get_target_config_name(self, config_name)
def _fix_quantum(self): if not (utils.service_enabled(settings.QUANTUM_CLIENT, self.instances, False)): #make the fake quantum (apparently needed so imports don't fail???) #TODO remove this... quantum_dir = sh.joinpths(self.dash_dir, 'quantum') if not sh.isdir(quantum_dir): self.tracewriter.make_dir(quantum_dir) self.tracewriter.touch_file(sh.joinpths(quantum_dir, '__init__.py')) self.tracewriter.touch_file(sh.joinpths(quantum_dir, 'client.py'))
def _fix_quantum(self): if not (utils.service_enabled(settings.QUANTUM_CLIENT, self.instances, False)): #make the fake quantum (apparently needed so imports don't fail???) #TODO remove this... quantum_dir = sh.joinpths(self.dash_dir, 'quantum') if not sh.isdir(quantum_dir): self.tracewriter.dirs_made(*sh.mkdirslist(quantum_dir)) for fn in FAKE_QUANTUM_FILES: self.tracewriter.file_touched(sh.touch_file(sh.joinpths(quantum_dir, fn)))
def _create_nodes(self): for i in range(1, 5): self.tracewriter.dirs_made(sh.mkdirslist(sh.joinpths(self.fs_dev, '%d/node' % i))) link_tgt = sh.joinpths(self.datadir, str(i)) sh.symlink(sh.joinpths(self.fs_dev, str(i)), link_tgt) self.tracewriter.symlink_made(link_tgt) start_port = (6010 + (i - 1) * 5) self._create_node_config(i, start_port) self._delete_templates()
def _get_target_config_name(self, config_fn): if config_fn == PLUGIN_CONF: tgt_loc = [self.appdir] + PLUGIN_LOC + [config_fn] return sh.joinpths(*tgt_loc) elif config_fn == AGENT_CONF: tgt_loc = [self.appdir] + AGENT_LOC + [config_fn] return sh.joinpths(*tgt_loc) else: return comp.PkgInstallComponent._get_target_config_name(self, config_fn)
def _get_source_config(self, config_fn): if config_fn == POLICY_JSON: fn = sh.joinpths(self.cfgdir, POLICY_JSON) contents = sh.load_file(fn) return (fn, contents) elif config_fn == LOGGING_CONF: fn = sh.joinpths(self.cfgdir, LOGGING_SOURCE_FN) contents = sh.load_file(fn) return (fn, contents) return comp.PythonInstallComponent._get_source_config(self, config_fn)
def _get_symlinks(self): links = comp.PythonInstallComponent._get_symlinks(self) src = self._get_target_config_name(HORIZON_APACHE_CONF) links[src] = APACHE_CONF_TARGETS[self.distro] if utils.service_enabled(settings.QUANTUM_CLIENT, self.instances, False): #TODO remove this junk, blah, puke that we have to do this qc = self.instances[settings.QUANTUM_CLIENT] src_pth = sh.joinpths(qc.appdir, 'quantum') tgt_dir = sh.joinpths(self.dash_dir, 'quantum') links[src_pth] = tgt_dir return links
def _create_node_config(self, node_number, port): for t in ['object', 'container', 'account']: src_fn = sh.joinpths(self.cfg_dir, '%s-server.conf' % t) tgt_fn = sh.joinpths(self.cfg_dir, '%s-server/%d.conf' % (t, node_number)) adjustments = { '%NODE_PATH%': sh.joinpths(self.datadir, str(node_number)), '%BIND_PORT%': str(port), '%LOG_FACILITY%': str(2 + node_number), } sh.copy_replace_file(src_fn, tgt_fn, adjustments) port += 1
def __init__(self, *args, **kargs): comp.PythonInstallComponent.__init__(self, *args, **kargs) self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR) self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR) self.paste_conf_fn = self._get_target_config_name(PASTE_CONF) self.volumes_enabled = False if NVOL in self.desired_subsystems: self.volumes_enabled = True self.xvnc_enabled = False if NXVNC in self.desired_subsystems: self.xvnc_enabled = True
def __init__(self, *args, **kargs): comp.PythonInstallComponent.__init__(self, TYPE, *args, **kargs) self.bindir = sh.joinpths(self.appdir, BIN_DIR) self.cfgdir = sh.joinpths(self.appdir, CONFIG_DIR) self.paste_conf_fn = self._get_target_config_name(PASTE_CONF) self.volumes_enabled = False if not self.component_opts or NVOL in self.component_opts: self.volumes_enabled = True self.xvnc_enabled = False if not self.component_opts or NXVNC in self.component_opts: self.xvnc_enabled = True
def _get_symlinks(self): src = self._get_target_config_name(HORIZON_APACHE_CONF) links = dict() links[src] = APACHE_CONF_TARGETS[self.distro] if settings.QUANTUM_CLIENT in self.instances: #TODO remove this junk, blah, puke that we have to do this qc = self.instances[settings.QUANTUM_CLIENT] src_pth = sh.joinpths(qc.appdir, 'quantum') if sh.isdir(src_pth): links[src_pth] = sh.joinpths(self.dash_dir, 'quantum') return links
def __create_node_config(self, node_number, port): for type_ in ['object', 'container', 'account']: sh.copy_replace_file(sh.joinpths(self.cfgdir, '%s-server.conf' % type_), sh.joinpths(self.cfgdir, '%s-server/%d.conf' \ % (type_, node_number)), { '%NODE_PATH%': sh.joinpths(self.datadir, str(node_number)), '%BIND_PORT%': str(port), '%LOG_FACILITY%': str(2 + node_number) }) port += 1
def _fake_quantum(self): #Horizon currently imports quantum even if you aren't using it. #Instead of installing quantum we can create a simple module #that will pass the initial imports. if settings.QUANTUM in self.instances: return else: #Make the fake quantum quantum_dir = sh.joinpths(self.dash_dir, 'quantum') self.tracewriter.make_dir(quantum_dir) self.tracewriter.touch_file(sh.joinpths(quantum_dir, '__init__.py')) self.tracewriter.touch_file(sh.joinpths(quantum_dir, 'client.py'))
def _get_source_config(self, config_fn): if config_fn == POLICY_JSON: # FIXME, maybe we shouldn't be sucking this from the checkout?? fn = sh.joinpths(self.app_dir, 'etc', POLICY_JSON) contents = sh.load_file(fn) return (fn, contents) elif config_fn == LOGGING_CONF: # FIXME, maybe we shouldn't be sucking this from the checkout?? fn = sh.joinpths(self.app_dir, 'etc', LOGGING_SOURCE_FN) contents = sh.load_file(fn) return (fn, contents) return comp.PythonInstallComponent._get_source_config(self, config_fn)
def __init__(self, component_name, **kargs): self.cfg = kargs.get("config") self.packager = kargs.get("packager") self.distro = kargs.get("distro") self.root = kargs.get("root") self.instances = kargs.get("instances") self.component_opts = kargs.get('opts') self.component_root = sh.joinpths(self.root, component_name) self.tracedir = sh.joinpths(self.component_root, settings.COMPONENT_TRACE_DIR) self.appdir = sh.joinpths(self.component_root, settings.COMPONENT_APP_DIR) self.cfgdir = sh.joinpths(self.component_root, settings.COMPONENT_CONFIG_DIR) self.component_name = component_name
def _get_source_config(self, config_fn): if config_fn == PLUGIN_CONF: srcfn = sh.joinpths(self.app_dir, "etc", config_fn) contents = sh.load_file(srcfn) return (srcfn, contents) elif config_fn == AGENT_CONF: # WHY U SO BURIED.... srcfn = sh.joinpths(self.app_dir, "etc", "quantum", "plugins", "openvswitch", config_fn) contents = sh.load_file(srcfn) return (srcfn, contents) else: return comp.PkgInstallComponent._get_source_config(self, config_fn)
def _get_apps_to_start(self): app_list = comp.ProgramRuntime._get_apps_to_start(self) if self.q_vswitch_service: app_list.append({"name": APP_Q_SERVER, "path": sh.joinpths(self.app_dir, "bin", APP_Q_SERVER)}) if self.q_vswitch_agent: app_list.append( { "name": APP_Q_AGENT, # WHY U SO BURIED.... "path": sh.joinpths(self.app_dir, "quantum", "plugins", "openvswitch", "agent", APP_Q_AGENT), } ) return app_list
def _get_source_config(self, config_fn): if config_fn == PLUGIN_CONF: srcloc = [self.appdir] + PLUGIN_LOC + [config_fn] srcfn = sh.joinpths(*srcloc) contents = sh.load_file(srcfn) return (srcfn, contents) elif config_fn == AGENT_CONF: srcloc = [self.appdir] + AGENT_LOC + [config_fn] srcfn = sh.joinpths(*srcloc) contents = sh.load_file(srcfn) return (srcfn, contents) else: return comp.PkgInstallComponent._get_source_config(self, config_fn)
def _get_apps_to_start(self): app_list = comp.ProgramRuntime._get_apps_to_start(self) if self.q_vswitch_service: app_list.append({ 'name': APP_Q_SERVER, 'path': sh.joinpths(self.appdir, BIN_DIR, APP_Q_SERVER), }) if self.q_vswitch_agent: full_pth = [self.appdir] + AGENT_BIN_LOC + [APP_Q_AGENT] app_list.append({ 'name': APP_Q_AGENT, 'path': sh.joinpths(*full_pth) }) return app_list
def _get_apps_to_start(self): app_list = comp.ProgramRuntime._get_apps_to_start(self) if self.q_vswitch_service: app_list.append({ 'name': APP_Q_SERVER, 'path': sh.joinpths(self.app_dir, 'bin', APP_Q_SERVER), }) if self.q_vswitch_agent: app_list.append({ 'name': APP_Q_AGENT, # WHY U SO BURIED.... 'path': sh.joinpths(self.app_dir, "quantum", "plugins", "openvswitch", 'agent', APP_Q_AGENT) }) return app_list
def _get_source_config(self, config_fn): if config_fn == ROOT_CONF: srcfn = sh.joinpths(self.cfgdir, config_fn) contents = sh.load_file(srcfn) return (srcfn, contents) else: return comp.PythonInstallComponent._get_source_config(self, config_fn)
def _get_param_map(self, config_fn): # This dict will be used to fill in the configuration # params with actual values mp = dict() if config_fn == HORIZON_APACHE_CONF: (user, group) = self._get_apache_user_group() mp['GROUP'] = group mp['USER'] = user mp['ACCESS_LOG'] = sh.joinpths(self.log_dir, APACHE_ACCESS_LOG_FN) mp['ERROR_LOG'] = sh.joinpths(self.log_dir, APACHE_ERROR_LOG_FN) mp['HORIZON_DIR'] = self.app_dir mp['HORIZON_PORT'] = self.cfg.getdefaulted('horizon', 'port', APACHE_DEF_PORT) mp['VPN_DIR'] = sh.joinpths(self.app_dir, "vpn") else: mp['OPENSTACK_HOST'] = self.cfg.get('host', 'ip') return mp
def _fix_log_dir(self): # This seems needed... # # Due to the following: # <<< Restarting rabbitmq-server: RabbitMQ is not running # <<< sh: /var/log/rabbitmq/startup_log: Permission denied # <<< FAILED - check /var/log/rabbitmq/startup_{log, _err} # # See: http://lists.rabbitmq.com/pipermail/rabbitmq-discuss/2011-March/011916.html # This seems like a bug, since we are just using service init and service restart... # And not trying to run this service directly... base_dir = sh.joinpths("/", 'var', 'log', 'rabbitmq') if sh.isdir(base_dir): with sh.Rooted(True): # Seems like we need root perms to list that directory... for fn in sh.listdir(base_dir): if re.match("(.*?)(err|log)$", fn, re.I): sh.chmod(sh.joinpths(base_dir, fn), 0666)
def _setup_cleaner(self): LOG.info("Configuring cleaner template %r", CLEANER_DATA_CONF) (_, contents) = utils.load_template(self.component_name, CLEANER_DATA_CONF) # FIXME, stop placing in checkout dir... tgt_fn = sh.joinpths(self.bin_dir, CLEANER_DATA_CONF) sh.write_file(tgt_fn, contents) sh.chmod(tgt_fn, 0755) self.tracewriter.file_touched(tgt_fn)
def _setup_network_initer(self): LOG.info("Configuring nova network initializer template %s.", NET_INIT_CONF) (_, contents) = utils.load_template(self.component_name, NET_INIT_CONF) params = self._get_param_map(NET_INIT_CONF) contents = utils.param_replace(contents, params, True) tgt_fn = sh.joinpths(self.bin_dir, NET_INIT_CONF) sh.write_file(tgt_fn, contents) sh.chmod(tgt_fn, 0755) self.tracewriter.file_touched(tgt_fn)
def _get_apps_to_start(self): apps = list() for subsys in self.desired_subsystems: apps.append({ 'name': SUB_COMPONENT_NAME_MAP[subsys], 'path': sh.joinpths(self.bin_dir, SUB_COMPONENT_NAME_MAP[subsys]), }) return apps
def _get_param_map(self, config_fn): # These be used to fill in the configuration/cmds + # params with actual values mp = comp.PythonInstallComponent._get_param_map(self, config_fn) mp['SERVICE_HOST'] = self.cfg.get('host', 'ip') mp['DEST'] = self.app_dir mp['BIN_DIR'] = self.bin_dir mp['CONFIG_FILE'] = sh.joinpths(self.cfg_dir, ROOT_CONF) mp.update(get_shared_params(self.cfg, self.pw_gen)) return mp
def _get_param_map(self, app_name): root_params = comp.ProgramRuntime._get_param_map(self, app_name) if app_name == VNC_PROXY_APP and 'nova' in self.options: nova_name = self.options['nova'] if nova_name in self.instances: # FIXME: Have to reach into the nova conf (puke) nova_runtime = self.instances[nova_name] root_params['NOVA_CONF'] = sh.joinpths(nova_runtime.cfg_dir, nova.API_CONF) return root_params
def _get_source_config(self, config_fn): if config_fn == CATALOG_CONF: return comp.PythonInstallComponent._get_source_config( self, config_fn) else: real_fn = config_fn if config_fn == LOGGING_CONF: real_fn = LOGGING_SOURCE_FN fn = sh.joinpths(self.app_dir, 'etc', real_fn) return (fn, sh.load_file(fn))
def _configure_network_settings(self, nova_conf): # TODO this might not be right.... if 'quantum' in self.options: nova_conf.add('network_manager', QUANTUM_MANAGER) hostip = self.cfg.get('host', 'ip') nova_conf.add('quantum_connection_host', self.cfg.getdefaulted('quantum', 'q_host', hostip)) nova_conf.add('quantum_connection_port', self.cfg.getdefaulted('quantum', 'q_port', '9696')) if self.cfg.get('quantum', 'q_plugin') == 'openvswitch': for (key, value) in QUANTUM_OPENSWITCH_OPS.items(): nova_conf.add(key, value) if 'melange' in self.options: nova_conf.add('quantum_ipam_lib', QUANTUM_IPAM_LIB) nova_conf.add('use_melange_mac_generation', True) nova_conf.add( 'melange_host', self.cfg.getdefaulted('melange', 'm_host', hostip)) nova_conf.add( 'melange_port', self.cfg.getdefaulted('melange', 'm_port', '9898')) else: nova_conf.add( 'network_manager', NET_MANAGER_TEMPLATE % (self._getstr('network_manager', DEF_NET_MANAGER))) # Configs dhcp bridge stuff??? # TODO: why is this the same as the nova.conf? nova_conf.add('dhcpbridge_flagfile', sh.joinpths(self.cfg_dir, API_CONF)) # Network prefix for the IP network that all the projects for future VM guests reside on. Example: 192.168.0.0/12 nova_conf.add('fixed_range', self._getstr('fixed_range')) # The value for vlan_interface may default to the the current value # of public_interface. We'll grab the value and keep it handy. public_interface = self._getstr('public_interface') vlan_interface = self._getstr('vlan_interface', public_interface) # Do a little check to make sure actually have that interface/s if not utils.is_interface(public_interface): msg = "Public interface %s is not a known interface" % ( public_interface) raise exceptions.ConfigException(msg) if not utils.is_interface(vlan_interface): msg = "VLAN interface %s is not a known interface" % ( vlan_interface) raise exceptions.ConfigException(msg) nova_conf.add('public_interface', public_interface) nova_conf.add('vlan_interface', vlan_interface) # This forces dnsmasq to update its leases table when an instance is terminated. nova_conf.add('force_dhcp_release', True)
def __init__(self, *args, **kargs): comp.PythonInstallComponent.__init__(self, *args, **kargs) self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR) self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR) self.datadir = sh.joinpths( self.app_dir, self.cfg.getdefaulted('swift', 'data_location', 'data')) self.logdir = sh.joinpths(self.datadir, LOG_DIR) self.startmain_file = sh.joinpths(self.bin_dir, SWIFT_STARTMAIN) self.makerings_file = sh.joinpths(self.bin_dir, SWIFT_MAKERINGS) self.fs_dev = sh.joinpths(self.datadir, DEVICE_PATH) self.fs_image = sh.joinpths(self.datadir, SWIFT_IMG) self.auth_server = AUTH_SERVICE
def _get_source_config(self, config_fn): name = config_fn if config_fn == PASTE_CONF: # Return the paste api template return comp.PythonInstallComponent._get_source_config( self, PASTE_SOURCE_FN) elif config_fn == LOGGING_CONF: name = LOGGING_SOURCE_FN srcfn = sh.joinpths(self.cfg_dir, "nova", name) contents = sh.load_file(srcfn) return (srcfn, contents)
def _setup_initer(self): LOG.info("Configuring keystone initializer template %s.", MANAGE_DATA_CONF) (_, contents) = utils.load_template(self.component_name, MANAGE_DATA_CONF) params = self._get_param_map(MANAGE_DATA_CONF) contents = utils.param_replace(contents, params, True) tgt_fn = sh.joinpths(self.bin_dir, MANAGE_DATA_CONF) sh.write_file(tgt_fn, contents) sh.chmod(tgt_fn, 0755) self.tracewriter.file_touched(tgt_fn)
def _ensure_db_access(self): # Need db access: # openstack-dashboard/local needs to be writeable by the runtime user # since currently its storing the sql-lite databases there (TODO fix that) path = sh.joinpths(self.dash_dir, 'local') if sh.isdir(path): (user, group) = self._get_apache_user_group() LOG.debug( "Changing ownership (recursively) of %r so that it can be used by %r/%r", path, group, user) sh.chown_r(path, sh.getuid(user), sh.getgid(group))
def __init__(self, *args, **kargs): comp.PythonInstallComponent.__init__(self, *args, **kargs) self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR) self.paste_conf_fn = self._get_target_config_name(PASTE_CONF) self.volumes_enabled = False self.volume_configurator = None self.volumes_enabled = NVOL in self.desired_subsystems self.xvnc_enabled = NXVNC in self.desired_subsystems self.volume_maker = None if self.volumes_enabled: self.volume_maker = NovaVolumeConfigurator(self) self.conf_maker = NovaConfConfigurator(self)
def _get_param_map(self, config_fn): # These be used to fill in the configuration/cmds + # params with actual values mp = dict() mp['SERVICE_HOST'] = self.cfg.get('host', 'ip') mp['DEST'] = self.app_dir mp['BIN_DIR'] = self.bin_dir mp['CONFIG_FILE'] = sh.joinpths(self.cfg_dir, ROOT_CONF) if config_fn == ROOT_CONF: mp['SQL_CONN'] = db.fetch_dbdsn(self.cfg, self.pw_gen, DB_NAME) mp['KEYSTONE_DIR'] = self.app_dir mp.update(get_shared_params(self.cfg, self.pw_gen)) elif config_fn == MANAGE_DATA_CONF: mp.update(get_shared_params(self.cfg, self.pw_gen)) return mp
def _clean_it(self): # These environment additions are important # in that they eventually affect how this script runs env = dict() env['ENABLED_SERVICES'] = ",".join(self.desired_subsystems) env['BIN_DIR'] = self.bin_dir env['VOLUME_NAME_PREFIX'] = self.cfg.getdefaulted( 'nova', 'volume_name_prefix', DEF_VOL_PREFIX) cleaner_fn = sh.joinpths(self.bin_dir, CLEANER_DATA_CONF) if sh.isfile(cleaner_fn): LOG.info( "Cleaning up your system by running nova cleaner script %r" % (cleaner_fn)) cmd = CLEANER_CMD_ROOT + [cleaner_fn] sh.execute(*cmd, run_as_root=True, env_overrides=env)
def _get_param_map(self, config_fn): mp = comp.PythonInstallComponent._get_param_map(self, config_fn) mp['CFG_FILE'] = sh.joinpths(self.cfg_dir, API_CONF) mp['BIN_DIR'] = self.bin_dir if config_fn == NET_INIT_CONF: mp['FLOATING_RANGE'] = self.cfg.getdefaulted( 'nova', 'floating_range', '172.24.4.224/28') mp['TEST_FLOATING_RANGE'] = self.cfg.getdefaulted( 'nova', 'test_floating_range', '192.168.253.0/29') mp['TEST_FLOATING_POOL'] = self.cfg.getdefaulted( 'nova', 'test_floating_pool', 'test') mp['FIXED_NETWORK_SIZE'] = self.cfg.getdefaulted( 'nova', 'fixed_network_size', '256') mp['FIXED_RANGE'] = self.cfg.getdefaulted('nova', 'fixed_range', '10.0.0.0/24') return mp
def post_start(self): tgt_fn = sh.joinpths(self.bin_dir, MANAGE_DATA_CONF) if sh.is_executable(tgt_fn): # If its still there, run it # these environment additions are important # in that they eventually affect how this script runs LOG.info( "Waiting %s seconds so that keystone can start up before running first time init." % (self.wait_time)) sh.sleep(self.wait_time) env = dict() env['ENABLED_SERVICES'] = ",".join(self.instances.keys()) env['BIN_DIR'] = self.bin_dir setup_cmd = MANAGE_CMD_ROOT + [tgt_fn] LOG.info("Running %r command to initialize keystone." % (" ".join(setup_cmd))) sh.execute(*setup_cmd, env_overrides=env, run_as_root=False) utils.mark_unexecute_file(tgt_fn, env)
def _do_upstart_configure(self, app_name, runtime_info): (app_pth, _, program_args) = runtime_info # TODO FIXME symlinks won't work. Need to copy the files there. # https://bugs.launchpad.net/upstart/+bug/665022 cfg_fn = sh.joinpths(CONF_ROOT, app_name + CONF_EXT) if sh.isfile(cfg_fn): LOG.debug("Upstart config file already exists: %s" % (cfg_fn)) return LOG.debug("Loading upstart template to be used by: %s" % (cfg_fn)) (_, contents) = utils.load_template('general', UPSTART_CONF_TMPL) params = self._get_upstart_conf_params(app_pth, app_name, *program_args) adjusted_contents = utils.param_replace(contents, params) LOG.debug("Generated up start config for %s: %s" % (app_name, adjusted_contents)) with sh.Rooted(True): sh.write_file(cfg_fn, adjusted_contents) sh.chmod(cfg_fn, 0666)
def _config_fixups(self): (user, group) = self._get_apache_user_group() # This is recorded so it gets cleaned up during uninstall self.tracewriter.file_touched(SOCKET_CONF) LOG.info("Fixing up %r and %r files" % (SOCKET_CONF, HTTPD_CONF)) with sh.Rooted(True): # Fix the socket prefix to someplace we can use fc = "WSGISocketPrefix %s" % (sh.joinpths(self.log_dir, "wsgi-socket")) sh.write_file(SOCKET_CONF, fc) # Now adjust the run user and group (of httpd.conf) new_lines = list() for line in sh.load_file(HTTPD_CONF).splitlines(): if line.startswith("User "): line = "User %s" % (user) if line.startswith("Group "): line = "Group %s" % (group) new_lines.append(line) sh.write_file(HTTPD_CONF, utils.joinlinesep(*new_lines))
def _setup_network_init(self): tgt_fn = sh.joinpths(self.bin_dir, NET_INIT_CONF) if sh.is_executable(tgt_fn): LOG.info("Creating your nova network to be used with instances.") # If still there, run it # these environment additions are important # in that they eventually affect how this script runs if 'quantum' in self.options: LOG.info( "Waiting %s seconds so that quantum can start up before running first time init." % (self.wait_time)) sh.sleep(self.wait_time) env = dict() env['ENABLED_SERVICES'] = ",".join(self.options) setup_cmd = NET_INIT_CMD_ROOT + [tgt_fn] LOG.info("Running %r command to initialize nova's network." % (" ".join(setup_cmd))) sh.execute(*setup_cmd, env_overrides=env, run_as_root=False) utils.mark_unexecute_file(tgt_fn, env)
def _get_param_map(self, config_fn): mp = dict() if config_fn == NET_INIT_CONF: mp['NOVA_DIR'] = self.app_dir mp['CFG_FILE'] = sh.joinpths(self.cfg_dir, API_CONF) mp['FLOATING_RANGE'] = self.cfg.getdefaulted( 'nova', 'floating_range', '172.24.4.224/28') mp['TEST_FLOATING_RANGE'] = self.cfg.getdefaulted( 'nova', 'test_floating_range', '192.168.253.0/29') mp['TEST_FLOATING_POOL'] = self.cfg.getdefaulted( 'nova', 'test_floating_pool', 'test') mp['FIXED_NETWORK_SIZE'] = self.cfg.getdefaulted( 'nova', 'fixed_network_size', '256') mp['FIXED_RANGE'] = self.cfg.getdefaulted('nova', 'fixed_range', '10.0.0.0/24') else: mp.update(keystone.get_shared_params(self.cfg, self.pw_gen, 'nova')) return mp
def _setup_vol_groups(self): LOG.info( "Attempting to setup volume groups for nova volume management.") mp = dict() backing_file = self.cfg.getdefaulted( 'nova', 'volume_backing_file', sh.joinpths(self.app_dir, 'nova-volumes-backing-file')) vol_group = self.cfg.getdefaulted('nova', 'volume_group', 'nova-volumes') backing_file_size = utils.to_bytes( self.cfg.getdefaulted('nova', 'volume_backing_file_size', '2052M')) mp['VOLUME_GROUP'] = vol_group mp['VOLUME_BACKING_FILE'] = backing_file mp['VOLUME_BACKING_FILE_SIZE'] = backing_file_size try: utils.execute_template(*VG_CHECK_CMD, params=mp) LOG.warn("Volume group already exists: %r" % (vol_group)) except exceptions.ProcessExecutionError as err: # Check that the error from VG_CHECK is an expected error if err.exit_code != 5: raise LOG.info("Need to create volume group: %r" % (vol_group)) sh.touch_file(backing_file, die_if_there=False, file_size=backing_file_size) vg_dev_result = utils.execute_template(*VG_DEV_CMD, params=mp) if vg_dev_result and vg_dev_result[0]: LOG.debug("VG dev result: %s" % (vg_dev_result)) # Strip the newlines out of the stdout (which is in the first # element of the first (and only) tuple in the response (sysout, _) = vg_dev_result[0] mp['DEV'] = sysout.replace('\n', '') utils.execute_template(*VG_CREATE_CMD, params=mp) # One way or another, we should have the volume group, Now check the # logical volumes self._process_lvs(mp) # Finish off by restarting tgt, and ignore any errors cmdrestart = self.distro.get_command('iscsi', 'restart', quiet=True) if cmdrestart: sh.execute(*cmdrestart, run_as_root=True, check_exit_code=False)
def _construct_instances(self, persona, action, root_dir): components = persona.wanted_components desired_subsystems = persona.wanted_subsystems or dict() component_opts = persona.component_options or dict() instances = dict() for c in components: (cls, my_info) = self.distro.extract_component(c, action) LOG.debug("Constructing class %s" % (cls)) cls_kvs = dict() cls_kvs['runner'] = self cls_kvs['component_dir'] = sh.joinpths(root_dir, c) cls_kvs['subsystem_info'] = my_info.get('subsystems') or dict() cls_kvs['all_instances'] = instances cls_kvs['name'] = c cls_kvs['keep_old'] = self.keep_old cls_kvs['desired_subsystems'] = desired_subsystems.get(c) or set() cls_kvs['options'] = component_opts.get(c) or dict() # The above is not overrideable... for (k, v) in my_info.items(): if k not in cls_kvs: cls_kvs[k] = v instances[c] = cls(**cls_kvs) return instances
def download(self): locations = self._get_download_locations() base_dir = self.app_dir for location_info in locations: uri_tuple = location_info["uri"] branch_tuple = location_info.get("branch") sub_dir = location_info.get("subdir") target_loc = base_dir if sub_dir: target_loc = sh.joinpths(base_dir, sub_dir) branch = None if branch_tuple: (cfg_section, cfg_key) = branch_tuple branch = self.cfg.get(cfg_section, cfg_key) if not branch: msg = "No branch entry found at config location [%s]" % \ (cfg_helpers.make_id(cfg_section, cfg_key)) raise excp.ConfigException(msg) (cfg_section, cfg_key) = uri_tuple uri = self.cfg.get(cfg_section, cfg_key) if not uri: msg = "No uri entry found at config location [%s]" % \ (cfg_helpers.make_id(cfg_section, cfg_key)) raise excp.ConfigException(msg) # Activate da download! self.tracewriter.download_happened(target_loc, uri) dirs_made = self._do_download(uri, target_loc, branch) # Here we ensure this is always added so that # if a keep old happens then this of course # won't be recreated, but if u uninstall without keeping old # then this won't be deleted this time around # adding it in is harmless and will make sure its removed. if target_loc not in dirs_made: dirs_made.append(target_loc) self.tracewriter.dirs_made(*dirs_made) return len(locations)
def __init__(self, *args, **kargs): comp.PythonInstallComponent.__init__(self, *args, **kargs) self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR) self.cfg_dir = sh.joinpths(self.app_dir, *CFG_LOC)
def __init__(self, *args, **kargs): comp.PythonInstallComponent.__init__(self, *args, **kargs) self.horizon_dir = sh.joinpths(self.app_dir, ROOT_HORIZON) self.dash_dir = sh.joinpths(self.app_dir, ROOT_DASH) self.log_dir = sh.joinpths(self.component_dir, LOGS_DIR)
def _setup_blackhole(self): # Create an empty directory that apache uses as docroot self.tracewriter.dirs_made( *sh.mkdirslist(sh.joinpths(self.app_dir, BLACKHOLE_DIR)))
def _get_target_config_name(self, config_name): if config_name == HORIZON_PY_CONF: return sh.joinpths(self.dash_dir, *HORIZON_PY_CONF_TGT) else: return comp.PythonInstallComponent._get_target_config_name( self, config_name)
def _get_param_map(self, app_name): pmap = comp.PythonRuntime._get_param_map(self, app_name) pmap['CFG_FILE'] = sh.joinpths(self.cfg_dir, ROOT_CONF_REAL_NAME) return pmap
def _sync_db(self): LOG.info("Syncing the database with melange.") mp = dict() mp['BIN_DIR'] = self.bin_dir mp['CFG_FILE'] = sh.joinpths(self.cfg_dir, ROOT_CONF_REAL_NAME) utils.execute_template(*DB_SYNC_CMD, params=mp)