def _do_network_init(self): if not sh.isfile( self.net_init_fn) and self.get_bool_option('do-network-init'): # Figure out the commands to run cmds = [] mp = {} if self.get_bool_option('enable_fixed'): # Create a fixed network mp['FIXED_NETWORK_SIZE'] = self.get_option( 'fixed_network_size', default_value='256') mp['FIXED_RANGE'] = self.get_option( 'fixed_range', default_value='10.0.0.0/24') cmds.extend(FIXED_NET_CMDS) if self.get_bool_option('enable_floating'): # Create a floating network + test floating pool cmds.extend(FLOATING_NET_CMDS) mp['FLOATING_RANGE'] = self.get_option( 'floating_range', default_value='172.24.4.224/28') mp['TEST_FLOATING_RANGE'] = self.get_option( 'test_floating_range', default_value='192.168.253.0/29') mp['TEST_FLOATING_POOL'] = self.get_option( 'test_floating_pool', default_value='test') # Anything to run?? if cmds: LOG.info( "Creating your nova network to be used with instances.") utils.execute_template(*cmds, params=mp) # Writing this makes sure that we don't init again cmd_mp = { 'cmds': cmds, 'replacements': mp, } sh.write_file(self.net_init_fn, utils.prettify_yaml(cmd_mp)) LOG.info("If you wish to re-run network initialization, delete %s", colorizer.quote(self.net_init_fn))
def build_all_srpms(self, package_files, tracewriter, jobs): (_fn, content) = utils.load_template(sh.joinpths("packaging", "makefiles"), "source.mk") scripts_dir = sh.abspth(sh.joinpths(settings.TEMPLATE_DIR, "packaging", "scripts")) cmdline = self._start_cmdline(escape_values=True)[1:] + [ "--scripts-dir", scripts_dir, "--source-only", "--rpm-base", self._rpmbuild_dir ] executable = " ".join(self._start_cmdline()[0:1]) params = { "DOWNLOADS_DIR": self._download_dir, "LOGS_DIR": self._log_dir, "PY2RPM": executable, "PY2RPM_FLAGS": " ".join(cmdline) } marks_dir = sh.joinpths(self._deps_dir, "marks-deps") if not sh.isdir(marks_dir): sh.mkdirslist(marks_dir, tracewriter=tracewriter) makefile_path = sh.joinpths(self._deps_dir, "deps.mk") sh.write_file(makefile_path, utils.expand_template(content, params), tracewriter=tracewriter) utils.log_iterable(package_files, header="Building %s SRPM packages using %s jobs" % (len(package_files), jobs), logger=LOG) self._execute_make(makefile_path, marks_dir, jobs)
def install(self): url_fn = self._extract_url_fn() if not url_fn: raise IOError("Can not determine file name from url: %r" % (self.url)) (cache_path, details_path) = self._cached_paths() use_cached = self._validate_cache(cache_path, details_path) if use_cached: LOG.info("Found valid cached image + metadata at: %s", colorizer.quote(cache_path)) unpack_info = utils.load_yaml_text(sh.load_file(details_path)) else: sh.mkdir(cache_path) if not self._is_url_local(): (fetched_fn, bytes_down) = down.UrlLibDownloader( self.url, sh.joinpths(cache_path, url_fn)).download() LOG.debug("For url %s we downloaded %s bytes to %s", self.url, bytes_down, fetched_fn) else: fetched_fn = self.url unpack_info = Unpacker().unpack(url_fn, fetched_fn, cache_path) sh.write_file(details_path, utils.prettify_yaml(unpack_info)) tgt_image_name = self._generate_img_name(url_fn) img_id = self._register(tgt_image_name, unpack_info) return (tgt_image_name, img_id)
def install(self): super(YumDependencyHandler, self).install() repo_filename = sh.joinpths(self.YUM_REPO_DIR, self.REPO_FN) # Ensure we copy the local repo file name to the main repo so that # yum will find it when installing packages. sh.write_file(repo_filename, sh.load_file(self.anvil_repo_filename), tracewriter=self.tracewriter) # Erase it if its been previously installed. cmdline = [] if self.helper.is_installed(self.OPENSTACK_DEPS_PACKAGE_NAME): cmdline.append(self.OPENSTACK_DEPS_PACKAGE_NAME) for p in self.nopackages: if self.helper.is_installed(p): cmdline.append(p) if cmdline: cmdline = ["yum", "erase", "-y"] + cmdline sh.execute(cmdline, stdout_fh=sys.stdout, stderr_fh=sys.stderr) cmdline = ["yum", "clean", "all"] sh.execute(cmdline) cmdline = ["yum", "install", "-y", self.OPENSTACK_DEPS_PACKAGE_NAME] sh.execute(cmdline, stdout_fh=sys.stdout, stderr_fh=sys.stderr) rpm_names = self._convert_names_python2rpm(self.python_names) if rpm_names: cmdline = ["yum", "install", "-y"] + rpm_names sh.execute(cmdline, stdout_fh=sys.stdout, stderr_fh=sys.stderr)
def build_all_srpms(self, package_files, tracewriter, jobs): (_fn, content) = utils.load_template(sh.joinpths("packaging", "makefiles"), "source.mk") scripts_dir = sh.abspth( sh.joinpths(settings.TEMPLATE_DIR, "packaging", "scripts")) cmdline = self._start_cmdline(escape_values=True)[1:] + [ "--scripts-dir", scripts_dir, "--source-only", "--rpm-base", self._rpmbuild_dir, "--debug", ] executable = " ".join(self._start_cmdline()[0:1]) params = { "DOWNLOADS_DIR": self._download_dir, "LOGS_DIR": self._log_dir, "PY2RPM": executable, "PY2RPM_FLAGS": " ".join(cmdline) } marks_dir = sh.joinpths(self._deps_dir, "marks-deps") if not sh.isdir(marks_dir): sh.mkdirslist(marks_dir, tracewriter=tracewriter) makefile_path = sh.joinpths(self._deps_dir, "deps.mk") sh.write_file(makefile_path, utils.expand_template(content, params), tracewriter=tracewriter) utils.log_iterable(package_files, header="Building %s SRPM packages using %s jobs" % (len(package_files), jobs), logger=LOG) self._execute_make(makefile_path, marks_dir, jobs)
def post_start(self): if not sh.isfile(self.init_fn) and self.get_bool_option("do-init"): self.wait_active() LOG.info("Running commands to initialize keystone.") (fn, contents) = utils.load_template(self.name, INIT_WHAT_FN) LOG.debug("Initializing with contents of %s", fn) params = {} params["keystone"] = khelper.get_shared_params( **utils.merge_dicts(self.options, khelper.get_shared_passwords(self)) ) params["glance"] = ghelper.get_shared_params(ip=self.get_option("ip"), **self.get_option("glance")) params["nova"] = nhelper.get_shared_params(ip=self.get_option("ip"), **self.get_option("nova")) wait_urls = [ params["keystone"]["endpoints"]["admin"]["uri"], params["keystone"]["endpoints"]["public"]["uri"], ] for url in wait_urls: utils.wait_for_url(url) init_what = utils.load_yaml_text(contents) init_what = utils.expand_template_deep(self._filter_init(init_what), params) khelper.Initializer( params["keystone"]["service_token"], params["keystone"]["endpoints"]["admin"]["uri"] ).initialize(**init_what) # Writing this makes sure that we don't init again sh.write_file(self.init_fn, utils.prettify_yaml(init_what)) LOG.info("If you wish to re-run initialization, delete %s", colorizer.quote(self.init_fn))
def download_dependencies(self): """Download dependencies from `$deps_dir/download-requires`.""" # NOTE(aababilov): do not drop download_dir - it can be reused sh.mkdirslist(self.download_dir, tracewriter=self.tracewriter) pips_to_download = self._filter_download_requires() sh.write_file(self.download_requires_filename, "\n".join([str(req) for req in pips_to_download])) if not pips_to_download: return ([], []) # NOTE(aababilov): user could have changed persona, so, # check that all requirements are downloaded.... if self._requirements_satisfied(pips_to_download, self.download_dir): LOG.info("All python dependencies have been already downloaded") else: def on_download_finish(time_taken): LOG.info("Took %0.2f seconds to download...", time_taken) def try_download(attempt): LOG.info("Downloading %s dependencies with pip (attempt %s)...", len(pips_to_download), attempt) output_filename = sh.joinpths(self.log_dir, "pip-download-attempt-%s.log" % (attempt)) LOG.info("Please wait this may take a while...") LOG.info("Check %s for download activity details...", colorizer.quote(output_filename)) utils.time_it(on_download_finish, pip_helper.download_dependencies, self.download_dir, pips_to_download, output_filename) utils.retry(self.MAX_PIP_DOWNLOAD_ATTEMPTS, self.PIP_DOWNLOAD_DELAY, try_download) pips_downloaded = [pip_helper.extract_requirement(p) for p in pips_to_download] what_downloaded = self._examine_download_dir(pips_downloaded, self.download_dir) return (pips_downloaded, what_downloaded)
def _write_spec_file(self, instance, rpm_name, template_name, params): requires_what = params.get('requires', []) test_requires_what = params.get('test_requires', []) egg_info = getattr(instance, 'egg_info', None) if egg_info: def ei_names(key): try: requires_python = [str(req) for req in egg_info[key]] except KeyError: return [] else: return self.py2rpm_helper.names_to_rpm_requires(requires_python) requires_what.extend(ei_names('dependencies')) test_requires_what.extend(ei_names('test_dependencies')) params["requires"] = requires_what params["test_requires"] = test_requires_what params["epoch"] = self.OPENSTACK_EPOCH params["part_fn"] = lambda filename: sh.joinpths( settings.TEMPLATE_DIR, self.SPEC_TEMPLATE_DIR, filename) parsed_version = pkg_resources.parse_version(params["version"]) params.update(self._make_spec_functors(parsed_version)) content = utils.load_template(self.SPEC_TEMPLATE_DIR, template_name)[1] spec_filename = sh.joinpths(self.rpmbuild_dir, "SPECS", "%s.spec" % rpm_name) sh.write_file(spec_filename, utils.expand_template(content, params), tracewriter=self.tracewriter) return spec_filename
def _install_node_repo(self): repo_url = self.get_option('nodejs_repo') if not repo_url: return # Download the said url and install it so that we can actually install # the node.js requirement which seems to be needed by horizon for css compiling?? repo_basename = sh.basename(repo_url) (_fn, fn_ext) = os.path.splitext(repo_basename) fn_ext = fn_ext.lower().strip() if fn_ext not in ['.rpm', '.repo']: LOG.warn("Unknown node.js repository configuration extension %s (we only support .rpm or .repo)!", colorizer.quote(fn_ext)) return with NamedTemporaryFile(suffix=fn_ext) as temp_fh: LOG.info("Downloading node.js repository configuration from %s to %s.", repo_url, temp_fh.name) down.UrlLibDownloader(repo_url, temp_fh.name).download() temp_fh.flush() if fn_ext == ".repo": # Just write out the repo file after downloading it... repo_file_name = sh.joinpths("/etc/yum.repos.d", repo_basename) if not sh.exists(repo_file_name): with sh.Rooted(True): sh.write_file(repo_file_name, sh.load_file(temp_fh.name), tracewriter=self.tracewriter) sh.chmod(repo_file_name, 0644) elif fn_ext == ".rpm": # Install it instead from said rpm (which likely is a # file that contains said repo location)... packager = yum.YumPackager(self.distro).direct_install(temp_fh.name)
def _do_network_init(self): ran_fn = self.net_init_fn if not sh.isfile(ran_fn) and self.get_bool_option('do-network-init'): # Figure out the commands to run cmds = [] mp = { 'CFG_FILE': self.config_path, 'BIN_DIR': self.bin_dir } mp['BIN_DIR'] = self.bin_dir if self.get_bool_option('enable_fixed'): # Create a fixed network mp['FIXED_NETWORK_SIZE'] = self.get_option('fixed_network_size', default_value='256') mp['FIXED_RANGE'] = self.get_option('fixed_range', default_value='10.0.0.0/24') cmds.extend(FIXED_NET_CMDS) if self.get_bool_option('enable_floating'): # Create a floating network + test floating pool cmds.extend(FLOATING_NET_CMDS) mp['FLOATING_RANGE'] = self.get_option('floating_range', default_value='172.24.4.224/28') mp['TEST_FLOATING_RANGE'] = self.get_option('test_floating_range', default_value='192.168.253.0/29') mp['TEST_FLOATING_POOL'] = self.get_option('test_floating_pool', default_value='test') # Anything to run?? if cmds: LOG.info("Creating your nova network to be used with instances.") utils.execute_template(*cmds, params=mp) # Writing this makes sure that we don't init again cmd_mp = { 'cmds': cmds, 'replacements': mp, } sh.write_file(ran_fn, utils.prettify_yaml(cmd_mp)) LOG.info("If you wish to re-run network initialization, delete %s", colorizer.quote(ran_fn))
def _create_repo(self, repo_name): repo_dir = sh.joinpths(self.anvil_repo_dir, repo_name) src_repo_dir = sh.joinpths(self.anvil_repo_dir, self.SRC_REPOS[repo_name]) for a_dir in (repo_dir, src_repo_dir): if not sh.isdir(a_dir): sh.mkdirslist(a_dir, tracewriter=self.tracewriter) cmdline = ["createrepo", a_dir] LOG.info("Creating repo at %s", a_dir) sh.execute(cmdline) repo_filename = sh.joinpths(self.anvil_repo_dir, "%s.repo" % repo_name) LOG.info("Writing %s", repo_filename) (_fn, content) = utils.load_template("packaging", "common.repo") params = { "repo_name": repo_name, "baseurl_bin": "file://%s" % repo_dir, "baseurl_src": "file://%s" % src_repo_dir, } sh.write_file(repo_filename, utils.expand_template(content, params), tracewriter=self.tracewriter) # NOTE(harlowja): Install *.repo file so that anvil deps will be available # when building openstack core project packages. system_repo_filename = sh.joinpths(self.YUM_REPO_DIR, "%s.repo" % repo_name) sh.copy(repo_filename, system_repo_filename, tracewriter=self.tracewriter) LOG.info("Copied to %s", system_repo_filename)
def _create_package(self): files = self._gather_files() params = { 'files': files, 'requires': self._requirements(), 'obsoletes': self._obsoletes(), 'conflicts': self._conflicts(), 'defines': self._defines(), 'undefines': self._undefines(), 'build': self._build_details(), 'who': sh.getuser(), 'date': utils.iso8601(), 'details': self.details, } (_fn, content) = utils.load_template('packaging', 'spec.tmpl') spec_base = self._make_fn("spec") spec_fn = sh.joinpths(self.build_paths['specs'], spec_base) LOG.debug("Creating spec file %s with params:", spec_fn) files['sources'].append("%s.tar.gz" % (spec_base)) utils.log_object(params, logger=LOG, level=logging.DEBUG) sh.write_file(spec_fn, utils.expand_template(content, params)) tar_it(sh.joinpths(self.build_paths['sources'], "%s.tar.gz" % (spec_base)), spec_base, wkdir=self.build_paths['specs'])
def _do_network_init(self): ran_fn = self.net_init_fn if not sh.isfile(ran_fn) and self.get_bool_option("do-network-init"): # Figure out the commands to run cmds = [] mp = {"CFG_FILE": self.config_path, "BIN_DIR": self.bin_dir} mp["BIN_DIR"] = self.bin_dir if self.get_bool_option("enable_fixed"): # Create a fixed network mp["FIXED_NETWORK_SIZE"] = self.get_option("fixed_network_size", default_value="256") mp["FIXED_RANGE"] = self.get_option("fixed_range", default_value="10.0.0.0/24") cmds.extend(FIXED_NET_CMDS) if self.get_bool_option("enable_floating"): # Create a floating network + test floating pool cmds.extend(FLOATING_NET_CMDS) mp["FLOATING_RANGE"] = self.get_option("floating_range", default_value="172.24.4.224/28") mp["TEST_FLOATING_RANGE"] = self.get_option("test_floating_range", default_value="192.168.253.0/29") mp["TEST_FLOATING_POOL"] = self.get_option("test_floating_pool", default_value="test") # Anything to run?? if cmds: LOG.info("Creating your nova network to be used with instances.") utils.execute_template(*cmds, params=mp) # Writing this makes sure that we don't init again cmd_mp = {"cmds": cmds, "replacements": mp} sh.write_file(ran_fn, utils.prettify_yaml(cmd_mp)) LOG.info("If you wish to re-run network initialization, delete %s", colorizer.quote(ran_fn))
def post_start(self): if not sh.isfile(self.init_fn) and self.get_bool_option('do-init'): self.wait_active() LOG.info("Running commands to initialize keystone.") (fn, contents) = utils.load_template(self.name, INIT_WHAT_FN) LOG.debug("Initializing with contents of %s", fn) params = {} params['keystone'] = khelper.get_shared_params(**utils.merge_dicts(self.options, khelper.get_shared_passwords(self))) params['glance'] = ghelper.get_shared_params(ip=self.get_option('ip'), **self.get_option('glance')) params['nova'] = nhelper.get_shared_params(ip=self.get_option('ip'), **self.get_option('nova')) params['neutron'] = net_helper.get_shared_params(ip=self.get_option('ip'), **self.get_option('neutron')) params['cinder'] = chelper.get_shared_params(ip=self.get_option('ip'), **self.get_option('cinder')) wait_urls = [ params['keystone']['endpoints']['admin']['uri'], params['keystone']['endpoints']['public']['uri'], ] for url in wait_urls: utils.wait_for_url(url) init_what = utils.load_yaml_text(contents) init_what = utils.expand_template_deep(init_what, params) try: init_how = khelper.Initializer(params['keystone']['service_token'], params['keystone']['endpoints']['admin']['uri']) init_how.initialize(**init_what) except RuntimeError: LOG.exception("Failed to initialize keystone, is the keystone client library available?") else: # Writing this makes sure that we don't init again sh.write_file(self.init_fn, utils.prettify_yaml(init_what)) LOG.info("If you wish to re-run initialization, delete %s", colorizer.quote(self.init_fn))
def _copy_startup_scripts(self, spec_filename): common_init_content = utils.load_template("packaging", "common.init")[1] for src in rpm.spec(spec_filename).sources: script = sh.basename(src[0]) if not (script.endswith(".init")): continue target_filename = sh.joinpths(self.rpm_sources_dir, script) if sh.isfile(target_filename): continue bin_name = utils.strip_prefix_suffix(script, "openstack-", ".init") if bin_name == "quantum-server": daemon_args = ("'--config-file=/etc/quantum/plugin.ini" " --config-file=/etc/quantum/quantum.conf'") elif bin_name == "quantum-l3-agent": daemon_args = ("'--config-file=/etc/quantum/l3_agent.ini" " --config-file=/etc/quantum/quantum.conf'") elif bin_name == "quantum-dhcp-agent": daemon_args = ("'--config-file=/etc/quantum/dhcp_agent.ini" " --config-file=/etc/quantum/quantum.conf'") else: daemon_args = "" params = { "bin": bin_name, "package": bin_name.split("-", 1)[0], "daemon_args": daemon_args, } sh.write_file(target_filename, utils.expand_template(common_init_content, params))
def _create_repo(self, repo_name): repo_dir = sh.joinpths(self.anvil_repo_dir, repo_name) src_repo_dir = sh.joinpths(self.anvil_repo_dir, self.SRC_REPOS[repo_name]) for a_dir in (repo_dir, src_repo_dir): if not sh.isdir(a_dir): sh.mkdirslist(a_dir, tracewriter=self.tracewriter) cmdline = ["createrepo", a_dir] LOG.info("Creating repo at %s", a_dir) sh.execute(cmdline) repo_filename = sh.joinpths(self.anvil_repo_dir, "%s.repo" % repo_name) LOG.info("Writing %s", repo_filename) (_fn, content) = utils.load_template("packaging", "common.repo") params = { "repo_name": repo_name, "baseurl_bin": "file://%s" % repo_dir, "baseurl_src": "file://%s" % src_repo_dir, } sh.write_file(repo_filename, utils.expand_template(content, params), tracewriter=self.tracewriter) # Install *.repo file so that anvil deps will be available # when building OpenStack system_repo_filename = sh.joinpths(self.YUM_REPO_DIR, "%s.repo" % repo_name) sh.copy(repo_filename, system_repo_filename) LOG.info("Copying to %s", system_repo_filename) self.tracewriter.file_touched(system_repo_filename)
def _create_package(self): files = self._gather_files() params = { "files": files, "requires": self._requirements(), "obsoletes": self._obsoletes(), "conflicts": self._conflicts(), "defines": self._defines(), "undefines": self._undefines(), "build": self._build_details(), "who": sh.getuser(), "date": utils.iso8601(), "patches": self._patches(), "details": self.details, } (_fn, content) = utils.load_template("packaging", "spec.tmpl") spec_base = self._make_fn("spec") spec_fn = sh.joinpths(self.build_paths["specs"], spec_base) LOG.debug("Creating spec file %s with params:", spec_fn) files["sources"].append("%s.tar.gz" % (spec_base)) utils.log_object(params, logger=LOG, level=logging.DEBUG) sh.write_file(spec_fn, utils.expand_template(content, params)) tar_it( sh.joinpths(self.build_paths["sources"], "%s.tar.gz" % (spec_base)), spec_base, wkdir=self.build_paths["specs"], )
def post_start(self): if not sh.isfile(self.init_fn) and self.get_bool_option('do-init'): self.wait_active() LOG.info("Running commands to initialize keystone.") (fn, contents) = utils.load_template(self.name, INIT_WHAT_FN) LOG.debug("Initializing with contents of %s", fn) params = {} params['keystone'] = khelper.get_shared_params(**utils.merge_dicts(self.options, khelper.get_shared_passwords(self))) params['glance'] = ghelper.get_shared_params(ip=self.get_option('ip'), **self.get_option('glance')) params['nova'] = nhelper.get_shared_params(ip=self.get_option('ip'), **self.get_option('nova')) params['quantum'] = qhelper.get_shared_params(ip=self.get_option('ip'), **self.get_option('quantum')) params['cinder'] = chelper.get_shared_params(ip=self.get_option('ip'), **self.get_option('cinder')) wait_urls = [ params['keystone']['endpoints']['admin']['uri'], params['keystone']['endpoints']['public']['uri'], ] for url in wait_urls: utils.wait_for_url(url) init_what = utils.load_yaml_text(contents) init_what = utils.expand_template_deep(self._filter_init(init_what), params) khelper.Initializer(params['keystone']['service_token'], params['keystone']['endpoints']['admin']['uri']).initialize(**init_what) # Writing this makes sure that we don't init again sh.write_file(self.init_fn, utils.prettify_yaml(init_what)) LOG.info("If you wish to re-run initialization, delete %s", colorizer.quote(self.init_fn))
def _do_network_init(self): ran_fn = sh.joinpths(self.get_option("trace_dir"), NET_INITED_FN) if not sh.isfile(ran_fn) and self.net_enabled: LOG.info("Creating your nova network to be used with instances.") # Figure out the commands to run mp = {} cmds = [] mp["CFG_FILE"] = sh.joinpths(self.get_option("cfg_dir"), API_CONF) mp["BIN_DIR"] = sh.joinpths(self.get_option("app_dir"), BIN_DIR) if self.cfg.getboolean("nova", "enable_fixed"): # Create a fixed network mp["FIXED_NETWORK_SIZE"] = self.cfg.getdefaulted("nova", "fixed_network_size", "256") mp["FIXED_RANGE"] = self.cfg.getdefaulted("nova", "fixed_range", "10.0.0.0/24") cmds.extend(FIXED_NET_CMDS) if not self.get_option("quantum"): if self.cfg.getboolean("nova", "enable_floating"): # Create a floating network + test floating pool cmds.extend(FLOATING_NET_CMDS) mp["FLOATING_RANGE"] = self.cfg.getdefaulted("nova", "floating_range", "172.24.4.224/28") mp["TEST_FLOATING_RANGE"] = self.cfg.getdefaulted("nova", "test_floating_range", "192.168.253.0/29") mp["TEST_FLOATING_POOL"] = self.cfg.getdefaulted("nova", "test_floating_pool", "test") else: LOG.info("Not creating floating IPs (not supported by quantum server)") LOG.info( "Waiting %s seconds so that quantum can start up before running first time init." % (self.wait_time) ) sh.sleep(self.wait_time) # Anything to run?? if cmds: utils.execute_template(*cmds, params=mp) # Writing this makes sure that we don't init again cmd_mp = {"cmds": cmds, "replacements": mp} sh.write_file(ran_fn, utils.prettify_yaml(cmd_mp)) LOG.info("If you wish to re-run initialization, delete %s", colorizer.quote(ran_fn))
def _write_spec_file(self, instance, rpm_name, template_name, params): requires_what = params.get('requires', []) test_requires_what = params.get('test_requires', []) egg_info = getattr(instance, 'egg_info', None) if egg_info: def ei_names(key): try: requires_python = [str(req) for req in egg_info[key]] except KeyError: return [] else: return self.py2rpm_helper.names_to_rpm_requires( requires_python) requires_what.extend(ei_names('dependencies')) test_requires_what.extend(ei_names('test_dependencies')) params["requires"] = requires_what params["test_requires"] = test_requires_what params["epoch"] = self.OPENSTACK_EPOCH params["part_fn"] = lambda filename: sh.joinpths( settings.TEMPLATE_DIR, self.SPEC_TEMPLATE_DIR, filename) parsed_version = pkg_resources.parse_version(params["version"]) params.update(self._make_spec_functors(parsed_version)) content = utils.load_template(self.SPEC_TEMPLATE_DIR, template_name)[1] spec_filename = sh.joinpths(self.rpmbuild_dir, "SPECS", "%s.spec" % rpm_name) sh.write_file(spec_filename, utils.expand_template(content, params), tracewriter=self.tracewriter) return spec_filename
def download_dependencies(self): """Download dependencies from `$deps_dir/download-requires`.""" # NOTE(aababilov): do not drop download_dir - it can be reused sh.mkdirslist(self.download_dir, tracewriter=self.tracewriter) pips_to_download = self._filter_download_requires() sh.write_file(self.download_requires_filename, "\n".join([str(req) for req in pips_to_download])) if not pips_to_download: return ([], []) # NOTE(aababilov): user could have changed persona, so, # check that all requirements are downloaded if (sh.isfile(self.downloaded_flag_file) and self._requirements_satisfied(pips_to_download, self.download_dir)): LOG.info("All python dependencies have been already downloaded") else: def try_download(attempt): LOG.info("Downloading %s dependencies with pip (attempt %s)...", len(pips_to_download), attempt) output_filename = sh.joinpths(self.log_dir, "pip-download-attempt-%s.log" % (attempt)) pip_helper.download_dependencies(self.download_dir, pips_to_download, output_filename) utils.retry(self.MAX_PIP_DOWNLOAD_ATTEMPTS, self.PIP_DOWNLOAD_DELAY, try_download) # NOTE(harlowja): Mark that we completed downloading successfully sh.touch_file(self.downloaded_flag_file, die_if_there=False, quiet=True, tracewriter=self.tracewriter) pips_downloaded = [pip_helper.extract_requirement(p) for p in pips_to_download] what_downloaded = self._examine_download_dir(pips_downloaded, self.download_dir) return (pips_downloaded, what_downloaded)
def _setup_cleaner(self): LOG.info("Configuring cleaner template: %s", colorizer.quote(CLEANER_DATA_CONF)) (_fn, contents) = utils.load_template(self.name, CLEANER_DATA_CONF) # FIXME(harlowja), stop placing in checkout dir... cleaner_fn = sh.joinpths(sh.joinpths(self.get_option('app_dir'), BIN_DIR), CLEANER_DATA_CONF) sh.write_file(cleaner_fn, contents) sh.chmod(cleaner_fn, 0755) self.tracewriter.file_touched(cleaner_fn)
def _config_fix_wsgi(self): # This is recorded so it gets cleaned up during uninstall self.tracewriter.file_touched("/etc/httpd/conf.d/wsgi-socket-prefix.conf") LOG.info("Fixing up: %s", colorizer.quote("/etc/httpd/conf.d/wsgi-socket-prefix.conf")) contents = "WSGISocketPrefix %s" % (sh.joinpths(self.log_dir, "wsgi-socket")) with sh.Rooted(True): # The name seems to need to come after wsgi.conf (so thats what we are doing) sh.write_file("/etc/httpd/conf.d/wsgi-socket-prefix.conf", contents)
def _gather_pips_to_install(self, requires_files, extra_pips=None): """Analyze requires_files and extra_pips. Updates `self.forced_packages` and `self.pips_to_install`. Writes requirements to `self.gathered_requires_filename`. """ extra_pips = extra_pips or [] cmdline = [ self.multipip_executable, "--skip-requirements-regex", "python.*client", "--pip", self.pip_executable ] cmdline = cmdline + extra_pips + ["-r"] + requires_files cmdline.extend(["--ignore-package"]) cmdline.extend(OPENSTACK_PACKAGES) cmdline.extend(SKIP_PACKAGE_NAMES) cmdline.extend(self.python_names) stdout, stderr = sh.execute(cmdline, check_exit_code=False) self.pips_to_install = list(utils.splitlines_not_empty(stdout)) sh.write_file(self.gathered_requires_filename, "\n".join(self.pips_to_install)) utils.log_iterable(sorted(self.pips_to_install), logger=LOG, header="Full known python dependency list") incompatibles = collections.defaultdict(list) if stderr: current_name = '' for line in stderr.strip().splitlines(): if line.endswith(": incompatible requirements"): current_name = line.split(":", 1)[0].lower().strip() if current_name not in incompatibles: incompatibles[current_name] = [] else: incompatibles[current_name].append(line) for (name, lines) in incompatibles.items(): if not name: continue LOG.warn("Incompatible requirements found for %s", colorizer.quote(name, quote_color='red')) for line in lines: LOG.warn(line) if not self.pips_to_install: LOG.error("No dependencies for OpenStack found." "Something went wrong. Please check:") LOG.error("'%s'" % "' '".join(cmdline)) raise exc.DependencyException("No dependencies for OpenStack found") # Translate those that we altered requirements for into a set of forced # requirements file (and associated list). self.forced_packages = [] for req in [pip_helper.extract_requirement(line) for line in self.pips_to_install]: if req.key in incompatibles: self.forced_packages.append(req) sh.write_file(self.forced_requires_filename, "\n".join([str(req) for req in self.forced_packages]))
def build_binary(self): def is_src_rpm(path): if not path: return False if not sh.isfile(path): return False if not path.lower().endswith('.src.rpm'): return False return True def list_src_rpms(path): path_files = [] if sh.isdir(path): path_files = sh.listdir(path, filter_func=is_src_rpm) return sorted(path_files) build_requirements = self.requirements.get("build-requires") if build_requirements: utils.log_iterable(build_requirements, header="Installing build requirements", logger=LOG) self.helper.transaction(install_pkgs=build_requirements, tracewriter=self.tracewriter) for repo_name in self.REPOS: src_repo_dir = sh.joinpths(self.anvil_repo_dir, self.SRC_REPOS[repo_name]) src_repo_files = list_src_rpms(src_repo_dir) if not src_repo_files: continue utils.log_iterable(src_repo_files, header=('Building %s RPM packages from their' ' SRPMs for repo %s using %s jobs') % (len(src_repo_files), self.SRC_REPOS[repo_name], self._jobs), logger=LOG) makefile_path = sh.joinpths(self.deps_dir, "binary-%s.mk" % repo_name) marks_dir = sh.joinpths(self.deps_dir, "marks-binary") if not sh.isdir(marks_dir): sh.mkdirslist(marks_dir, tracewriter=self.tracewriter) rpmbuild_flags = "--rebuild" if self.opts.get("usr_only", False): rpmbuild_flags += " --define 'usr_only 1'" params = { "SRC_REPO_DIR": src_repo_dir, "RPMBUILD_FLAGS": rpmbuild_flags, "LOGS_DIR": self.log_dir, 'RPMTOP_DIR': self.rpmbuild_dir, } (_fn, content) = utils.load_template(sh.joinpths("packaging", "makefiles"), "binary.mk") sh.write_file(makefile_path, utils.expand_template(content, params), tracewriter=self.tracewriter) with sh.remove_before_after(self.rpmbuild_dir): self._create_rpmbuild_subdirs() self._execute_make(makefile_path, marks_dir) repo_dir = sh.joinpths(self.anvil_repo_dir, repo_name) for d in sh.listdir(self.rpmbuild_dir, dirs_only=True): self._move_rpm_files(sh.joinpths(d, "RPMS"), repo_dir) self._move_rpm_files(sh.joinpths(self.rpmbuild_dir, "RPMS"), repo_dir) self._create_repo(repo_name)
def download_dependencies(self): """Download dependencies from `$deps_dir/download-requires`. """ # NOTE(aababilov): do not drop download_dir - it can be reused sh.mkdirslist(self.download_dir, tracewriter=self.tracewriter) download_requires_filename = sh.joinpths(self.deps_dir, "download-requires") raw_pips_to_download = self.filter_download_requires() sh.write_file(download_requires_filename, "\n".join(str(req) for req in raw_pips_to_download)) if not raw_pips_to_download: return ([], []) downloaded_flag_file = sh.joinpths(self.deps_dir, "pip-downloaded") # NOTE(aababilov): user could have changed persona, so, # check that all requirements are downloaded if sh.isfile(downloaded_flag_file) and self._requirements_satisfied( raw_pips_to_download, self.download_dir): LOG.info("All python dependencies have been already downloaded") else: pip_dir = sh.joinpths(self.deps_dir, "pip") pip_download_dir = sh.joinpths(pip_dir, "download") pip_build_dir = sh.joinpths(pip_dir, "build") # NOTE(aababilov): do not clean the cache, it is always useful pip_cache_dir = sh.joinpths(self.deps_dir, "pip-cache") pip_failures = [] for attempt in xrange(self.MAX_PIP_DOWNLOAD_ATTEMPTS): # NOTE(aababilov): pip has issues with already downloaded files sh.deldir(pip_dir) sh.mkdir(pip_download_dir, recurse=True) header = "Downloading %s python dependencies (attempt %s)" header = header % (len(raw_pips_to_download), attempt) utils.log_iterable(sorted(raw_pips_to_download), logger=LOG, header=header) failed = False try: self._try_download_dependencies(attempt, raw_pips_to_download, pip_download_dir, pip_cache_dir, pip_build_dir) pip_failures = [] except exc.ProcessExecutionError as e: LOG.exception("Failed downloading python dependencies") pip_failures.append(e) failed = True if not failed: break for filename in sh.listdir(pip_download_dir, files_only=True): sh.move(filename, self.download_dir, force=True) sh.deldir(pip_dir) if pip_failures: raise pip_failures[-1] with open(downloaded_flag_file, "w"): pass pips_downloaded = [pip_helper.extract_requirement(p) for p in raw_pips_to_download] self._examine_download_dir(pips_downloaded, self.download_dir) what_downloaded = sh.listdir(self.download_dir, files_only=True) return (pips_downloaded, what_downloaded)
def _replace_deployment_paths(self, root_dir, replacer): total_replacements = 0 files_replaced = 0 for path in sh.listdir(root_dir, recursive=True, files_only=True): new_contents, replacements = replacer(sh.load_file(path)) if replacements: sh.write_file(path, new_contents) total_replacements += replacements files_replaced += 1 return (files_replaced, total_replacements)
def _record_srpm_files(self, files): if not files: return buf = six.StringIO() for f in files: buf.write(f) buf.write("\n") if sh.isfile(self.generated_srpms_filename): sh.append_file(self.generated_srpms_filename, "\n" + buf.getvalue()) else: sh.write_file(self.generated_srpms_filename, buf.getvalue())
def _setup_cleaner(self): LOG.info("Configuring cleaner template: %s", colorizer.quote(CLEANER_DATA_CONF)) (_fn, contents) = utils.load_template(self.name, CLEANER_DATA_CONF) # FIXME(harlowja), stop placing in checkout dir... cleaner_fn = sh.joinpths( sh.joinpths(self.get_option('app_dir'), BIN_DIR), CLEANER_DATA_CONF) sh.write_file(cleaner_fn, contents) sh.chmod(cleaner_fn, 0755) self.tracewriter.file_touched(cleaner_fn)
def download_dependencies(self, clear_cache=False): """Download dependencies from `$deps_dir/download-requires`. :param clear_cache: clear `$deps_dir/cache` dir (pip can work incorrectly when it has a cache) """ sh.deldir(self.download_dir) sh.mkdir(self.download_dir, recurse=True) download_requires_filename = sh.joinpths(self.deps_dir, "download-requires") raw_pips_to_download = self.filter_download_requires() pips_to_download = [ pkg_resources.Requirement.parse(str(p.strip())) for p in raw_pips_to_download if p.strip() ] sh.write_file(download_requires_filename, "\n".join(str(req) for req in pips_to_download)) if not pips_to_download: return [] pip_dir = sh.joinpths(self.deps_dir, "pip") pip_download_dir = sh.joinpths(pip_dir, "download") pip_build_dir = sh.joinpths(pip_dir, "build") pip_cache_dir = sh.joinpths(pip_dir, "cache") if clear_cache: sh.deldir(pip_cache_dir) pip_failures = [] how_many = len(pips_to_download) for attempt in xrange(self.MAX_PIP_DOWNLOAD_ATTEMPTS): # NOTE(aababilov): pip has issues with already downloaded files sh.deldir(pip_download_dir) sh.mkdir(pip_download_dir, recurse=True) sh.deldir(pip_build_dir) utils.log_iterable(sorted(raw_pips_to_download), logger=LOG, header=("Downloading %s python dependencies " "(attempt %s)" % (how_many, attempt))) failed = False try: self._try_download_dependencies(attempt, pips_to_download, pip_download_dir, pip_cache_dir, pip_build_dir) pip_failures = [] except exc.ProcessExecutionError as e: LOG.exception("Failed downloading python dependencies") pip_failures.append(e) failed = True if not failed: break if pip_failures: raise pip_failures[-1] for filename in sh.listdir(pip_download_dir, files_only=True): sh.move(filename, self.download_dir) return sh.listdir(self.download_dir, files_only=True)
def _configure_db_confs(self): LOG.info("Fixing up %s mysql configs.", colorizer.quote(self.distro.name)) fc = sh.load_file('/etc/my.cnf') lines = fc.splitlines() new_lines = list() for line in lines: if line.startswith('skip-grant-tables'): line = '#' + line new_lines.append(line) fc = utils.joinlinesep(*new_lines) with sh.Rooted(True): sh.write_file('/etc/my.cnf', fc)
def pre_install(self): comp.PythonInstallComponent.pre_install(self) if self.cfg.getboolean('glance', 'eliminate_pip_gits'): fn = sh.joinpths(self.get_option('app_dir'), 'tools', 'pip-requires') if sh.isfile(fn): new_lines = [] for line in sh.load_file(fn).splitlines(): if line.find("git://") != -1: new_lines.append("# %s" % (line)) else: new_lines.append(line) sh.write_file(fn, "\n".join(new_lines))
def _gather_pips_to_install(self, requires_files, extra_pips=None): """Analyze requires_files and extra_pips. Updates `self.forced_pips` and `self.pips_to_install`. Writes requirements to `self.gathered_requires_filename`. """ ignore_pips = set(self.python_names) ignore_pips.update(self.ignore_pips) forced_pips = set() forced_distro_pips = self.distro.get_dependency_config("forced_pips", quiet=True) if forced_distro_pips: forced_pips.update(forced_distro_pips) compatibles, incompatibles = self.multipip.resolve( extra_pips, requires_files, ignore_pips, forced_pips) self.pips_to_install = compatibles sh.write_file(self.gathered_requires_filename, "\n".join(self.pips_to_install)) pip_requirements, raw_requirements = pip_helper.read_requirement_files( [self.gathered_requires_filename]) pips_to_install = sorted(raw_requirements, cmp=sort_req) utils.log_iterable(pips_to_install, logger=LOG, header="Full known python dependency list") for (name, lines) in incompatibles.items(): LOG.warn("Incompatible requirements found for %s", colorizer.quote(name, quote_color='red')) for line in lines: LOG.warn(line) if not self.pips_to_install: LOG.error("No valid dependencies found. Something went wrong.") raise exc.DependencyException("No valid dependencies found") # Translate those that we altered requirements for into a set of forced # requirements file (and associated list). self.forced_pips = [] forced_pip_keys = [] for req in [ pip_helper.extract_requirement(line) for line in self.pips_to_install ]: if req.key in incompatibles and req.key not in forced_pip_keys: self.forced_pips.append(req) forced_pip_keys.append(req.key) self.forced_pips = sorted(self.forced_pips, cmp=sort_req) forced_pips = [str(req) for req in self.forced_pips] utils.log_iterable(forced_pips, logger=LOG, header="Automatically forced python dependencies") sh.write_file(self.forced_requires_filename, "\n".join(forced_pips))
def _configure_files(self): config_fns = self.config_files if config_fns: utils.log_iterable(config_fns, logger=LOG, header="Configuring %s files" % (len(config_fns))) for fn in config_fns: tgt_fn = self.target_config(fn) sh.mkdirslist(sh.dirname(tgt_fn), tracewriter=self.tracewriter) (source_fn, contents) = self.source_config(fn) LOG.debug("Configuring file %s ---> %s.", (source_fn), (tgt_fn)) contents = self._config_param_replace(fn, contents, self.config_params(fn)) contents = self._config_adjust(contents, fn) sh.write_file(tgt_fn, contents, tracewriter=self.tracewriter) return len(config_fns)
def _filter_download_requires(self): yum_map = self._get_known_yum_packages() pip_origins = {} for line in self.pips_to_install: req = pip_helper.extract_requirement(line) pip_origins[req.key] = line pips_to_download = [] req_to_install = [ pip_helper.extract_requirement(line) for line in self.pips_to_install ] requested_names = [req.key for req in req_to_install] rpm_names = self.py2rpm_helper.names_to_rpm_names(requested_names) satisfied_list = [] for req in req_to_install: rpm_name = rpm_names[req.key] rpm_info = self._find_yum_match(yum_map, req, rpm_name) if not rpm_info: # We need the source requirement in case it's a url. pips_to_download.append(pip_origins[req.key]) else: satisfied_list.append((req, rpm_name, rpm_info)) yum_buff = six.StringIO() if satisfied_list: # Organize by repo repos = collections.defaultdict(list) for (req, rpm_name, rpm_info) in satisfied_list: repo = rpm_info['repo'] rpm_found = '%s-%s' % (rpm_name, rpm_info['version']) repos[repo].append( "%s as %s" % (colorizer.quote(req), colorizer.quote(rpm_found))) dep_info = { 'requirement': str(req), 'rpm': rpm_info, } yum_buff.write(json.dumps(dep_info)) yum_buff.write("\n") for r in sorted(repos.keys()): header = ("%s Python packages are already available " "as RPMs from repository %s") header = header % (len(repos[r]), colorizer.quote(r)) utils.log_iterable(sorted(repos[r]), logger=LOG, header=header, color=None) sh.write_file(self.yum_satisfies_filename, yum_buff.getvalue()) return pips_to_download
def build_binary(self): def _install_build_requirements(): build_requires = self.requirements["build-requires"] if build_requires: utils.log_iterable(sorted(build_requires), header=("Installing %s build requirements" % len(build_requires)), logger=LOG) cmdline = ["yum", "install", "-y"] + list(build_requires) sh.execute(cmdline) def _is_src_rpm(filename): return filename.endswith('.src.rpm') _install_build_requirements() for repo_name in self.REPOS: repo_dir = sh.joinpths(self.anvil_repo_dir, repo_name) sh.mkdirslist(repo_dir, tracewriter=self.tracewriter) src_repo_dir = sh.joinpths(self.anvil_repo_dir, self.SRC_REPOS[repo_name]) if sh.isdir(src_repo_dir): src_repo_files = sh.listdir(src_repo_dir, files_only=True) src_repo_files = sorted([f for f in src_repo_files if _is_src_rpm(f)]) else: src_repo_files = [] if not src_repo_files: continue src_repo_base_files = [sh.basename(f) for f in src_repo_files] LOG.info('Building %s RPM packages from their SRPMs for repo %s using %s jobs', len(src_repo_files), self.SRC_REPOS[repo_name], self.jobs) makefile_name = sh.joinpths(self.deps_dir, "binary-%s.mk" % repo_name) marks_dir = sh.joinpths(self.deps_dir, "marks-binary") sh.mkdirslist(marks_dir, tracewriter=self.tracewriter) (_fn, content) = utils.load_template("packaging/makefiles", "binary.mk") rpmbuild_flags = ("--rebuild --define '_topdir %s'" % self.rpmbuild_dir) if self.opts.get("usr_only", False): rpmbuild_flags += "--define 'usr_only 1'" params = { "SRC_REPO_DIR": src_repo_dir, "RPMBUILD_FLAGS": rpmbuild_flags, "LOGS_DIR": self.log_dir, } sh.write_file(makefile_name, utils.expand_template(content, params), tracewriter=self.tracewriter) with sh.remove_before_after(self.rpmbuild_dir): self._create_rpmbuild_subdirs() self._execute_make(makefile_name, marks_dir) self._move_files(sh.joinpths(self.rpmbuild_dir, "RPMS"), repo_dir) self._create_repo(repo_name)
def download_dependencies(self): """Download dependencies from `$deps_dir/download-requires`.""" # NOTE(aababilov): do not drop download_dir - it can be reused sh.mkdirslist(self.download_dir, tracewriter=self.tracewriter) pips_to_download = self._filter_download_requires() sh.write_file(self.download_requires_filename, "\n".join([str(req) for req in pips_to_download])) if not pips_to_download: return ([], []) # NOTE(aababilov): user could have changed persona, so, # check that all requirements are downloaded if (sh.isfile(self.downloaded_flag_file) and self._requirements_satisfied(pips_to_download, self.download_dir)): LOG.info("All python dependencies have been already downloaded") else: pip_failures = [] for attempt in xrange(self.MAX_PIP_DOWNLOAD_ATTEMPTS): # NOTE(aababilov): pip has issues with already downloaded files for filename in sh.listdir(self.download_dir, files_only=True): sh.unlink(filename) header = "Downloading %s python dependencies (attempt %s)" header = header % (len(pips_to_download), attempt + 1) utils.log_iterable(sorted(pips_to_download), logger=LOG, header=header) failed = False try: self._try_download_dependencies(attempt + 1, pips_to_download, self.download_dir) pip_failures = [] except exc.ProcessExecutionError as e: LOG.exception("Failed downloading python dependencies") pip_failures.append(e) failed = True if not failed: break if pip_failures: raise pip_failures[-1] # NOTE(harlowja): Mark that we completed downloading successfully sh.touch_file(self.downloaded_flag_file, die_if_there=False, quiet=True, tracewriter=self.tracewriter) pips_downloaded = [ pip_helper.extract_requirement(p) for p in pips_to_download ] self._examine_download_dir(pips_downloaded, self.download_dir) return (pips_downloaded, sh.listdir(self.download_dir, files_only=True))
def download_dependencies(self, clear_cache=False): """Download dependencies from `$deps_dir/download-requires`. :param clear_cache: clear `$deps_dir/cache` dir (pip can work incorrectly when it has a cache) """ sh.deldir(self.download_dir) sh.mkdir(self.download_dir, recurse=True) download_requires_filename = sh.joinpths(self.deps_dir, "download-requires") raw_pips_to_download = self.filter_download_requires() pips_to_download = [pkg_resources.Requirement.parse(str(p.strip())) for p in raw_pips_to_download if p.strip()] sh.write_file(download_requires_filename, "\n".join(str(req) for req in pips_to_download)) if not pips_to_download: return [] pip_dir = sh.joinpths(self.deps_dir, "pip") pip_download_dir = sh.joinpths(pip_dir, "download") pip_build_dir = sh.joinpths(pip_dir, "build") pip_cache_dir = sh.joinpths(pip_dir, "cache") if clear_cache: sh.deldir(pip_cache_dir) pip_failures = [] how_many = len(pips_to_download) for attempt in xrange(self.MAX_PIP_DOWNLOAD_ATTEMPTS): # NOTE(aababilov): pip has issues with already downloaded files sh.deldir(pip_download_dir) sh.mkdir(pip_download_dir, recurse=True) sh.deldir(pip_build_dir) utils.log_iterable(sorted(raw_pips_to_download), logger=LOG, header=("Downloading %s python dependencies " "(attempt %s)" % (how_many, attempt))) failed = False try: self._try_download_dependencies(attempt, pips_to_download, pip_download_dir, pip_cache_dir, pip_build_dir) pip_failures = [] except exc.ProcessExecutionError as e: LOG.exception("Failed downloading python dependencies") pip_failures.append(e) failed = True if not failed: break if pip_failures: raise pip_failures[-1] for filename in sh.listdir(pip_download_dir, files_only=True): sh.move(filename, self.download_dir) return sh.listdir(self.download_dir, files_only=True)
def configure(self): configs_made = nova.NovaInstaller.configure(self) driver_canon = utils.canon_virt_driver(self.get_option('virt_driver')) if driver_canon == 'libvirt': # Create a libvirtd user group if not sh.group_exists('libvirtd'): cmd = ['groupadd', 'libvirtd'] sh.execute(cmd) if not sh.isfile(LIBVIRT_POLICY_FN): contents = self._get_policy(self._get_policy_users()) sh.mkdirslist(sh.dirname(LIBVIRT_POLICY_FN)) sh.write_file(LIBVIRT_POLICY_FN, contents) configs_made += 1 return configs_made
def configure(self): configs_made = nova.NovaInstaller.configure(self) driver_canon = nhelper.canon_virt_driver(self.cfg.get('nova', 'virt_driver')) if driver_canon == 'libvirt': (fn, contents) = self._get_policy(self._get_policy_users()) dirs_made = list() with sh.Rooted(True): # TODO check if this dir is restricted before assuming it isn't? dirs_made.extend(sh.mkdirslist(sh.dirname(fn))) sh.write_file(fn, contents) self.tracewriter.cfg_file_written(fn) self.tracewriter.dirs_made(*dirs_made) configs_made += 1 return configs_made
def _configure_files(self): config_fns = self.configurator.config_files if config_fns: utils.log_iterable(config_fns, logger=LOG, header="Configuring %s files" % (len(config_fns))) for fn in config_fns: tgt_fn = self.configurator.target_config(fn) sh.mkdirslist(sh.dirname(tgt_fn), tracewriter=self.tracewriter) (source_fn, contents) = self.configurator.source_config(fn) LOG.debug("Configuring file %s ---> %s.", (source_fn), (tgt_fn)) contents = self.configurator.config_param_replace(fn, contents, self.config_params(fn)) contents = self.configurator.config_adjust(contents, fn) sh.write_file(tgt_fn, contents, tracewriter=self.tracewriter) return len(config_fns)
def _generate_root_wrap(self): if not self.cfg.getboolean("nova", "do_root_wrap"): return False else: lines = list() lines.append("%s ALL=(root) NOPASSWD: %s" % (sh.getuser(), self.root_wrap_bin)) fc = utils.joinlinesep(*lines) root_wrap_fn = sh.joinpths(self.distro.get_command_config("sudoers_dir"), "nova-rootwrap") self.tracewriter.file_touched(root_wrap_fn) with sh.Rooted(True): sh.write_file(root_wrap_fn, fc) sh.chmod(root_wrap_fn, 0440) sh.chown(root_wrap_fn, sh.getuid(sh.ROOT_USER), sh.getgid(sh.ROOT_GROUP)) return True
def build_all_binaries(self, repo_name, src_repo_dir, rpmbuild_flags, tracewriter, jobs): makefile_path = sh.joinpths(self._deps_dir, "binary-%s.mk" % repo_name) marks_dir = sh.joinpths(self._deps_dir, "marks-binary") if not sh.isdir(marks_dir): sh.mkdirslist(marks_dir, tracewriter=tracewriter) params = { "SRC_REPO_DIR": src_repo_dir, "RPMBUILD_FLAGS": rpmbuild_flags, "LOGS_DIR": self._log_dir, "RPMTOP_DIR": self._rpmbuild_dir, } (_fn, content) = utils.load_template(sh.joinpths("packaging", "makefiles"), "binary.mk") sh.write_file(makefile_path, utils.expand_template(content, params), tracewriter=tracewriter) self._execute_make(makefile_path, marks_dir, jobs)
def store_current_settings(c_settings): # Remove certain keys that just shouldn't be saved to_save = dict(c_settings) for k in ['action', 'verbose']: if k in c_settings: to_save.pop(k, None) buf = six.StringIO() buf.write("# Anvil last used settings\n") buf.write( utils.add_header(SETTINGS_FILE, utils.prettify_yaml(to_save), adjusted=sh.isfile(SETTINGS_FILE))) try: sh.write_file(SETTINGS_FILE, buf.getvalue()) except OSError as e: LOG.warn("Failed writing to %s due to %s", SETTINGS_FILE, e)
def _write_exports(self, component_order, instances, path): entries = [] contents = StringIO() contents.write("# Exports for action %s\n\n" % (self.name)) for c in component_order: exports = instances[c].env_exports if exports: contents.write("# Exports for %s\n" % (c)) for (k, v) in exports.items(): export_entry = "export %s=%s" % (k, sh.shellquote(str(v).strip())) entries.append(export_entry) contents.write("%s\n" % (export_entry)) contents.write("\n") if entries: sh.write_file(path, contents.getvalue()) utils.log_iterable(entries, header="Wrote to %s %s exports" % (path, len(entries)), logger=LOG)
def gather_pips_to_install(self, requires_files, extra_pips=None): """Analyze requires_files and extra_pips. Updates `self.forced_packages` and `self.pips_to_install`. Writes requirements to `self.gathered_requires_filename`. """ extra_pips = extra_pips or [] cmdline = [ self.multipip_executable, "--skip-requirements-regex", "python.*client", "--pip", self.pip_executable ] cmdline = cmdline + extra_pips + ["-r"] + requires_files output = sh.execute(cmdline, check_exit_code=False) conflict_descr = output[1].strip() forced_keys = set() if conflict_descr: for line in conflict_descr.splitlines(): LOG.warning(line) if line.endswith(": incompatible requirements"): forced_keys.add(line.split(":", 1)[0].lower()) self.pips_to_install = [ pkg for pkg in utils.splitlines_not_empty(output[0]) if pkg.lower() not in OPENSTACK_PACKAGES ] sh.write_file(self.gathered_requires_filename, "\n".join(self.pips_to_install)) if not self.pips_to_install: LOG.error("No dependencies for OpenStack found." "Something went wrong. Please check:") LOG.error("'%s'" % "' '".join(cmdline)) raise RuntimeError("No dependencies for OpenStack found") utils.log_iterable(sorted(self.pips_to_install), logger=LOG, header="Full known python dependency list") self.forced_packages = [] for pip in self.pips_to_install: req = pkg_resources.Requirement.parse(pip) if req.key in forced_keys: self.forced_packages.append(req) sh.write_file(self.forced_requires_filename, "\n".join(str(req) for req in self.forced_packages))
def _write_spec_file(self, instance, rpm_name, template_name, params): requires_what = params.get('requires') if not requires_what: requires_what = [] requires_python = [] try: requires_python.extend(instance.egg_info['dependencies']) except AttributeError: pass if requires_python: requires_what.extend( self._convert_names_python2rpm(requires_python, False)) params['requires'] = requires_what params["epoch"] = self.OPENSTACK_EPOCH content = utils.load_template(self.SPEC_TEMPLATE_DIR, template_name)[1] spec_filename = sh.joinpths(self.rpmbuild_dir, "SPECS", "%s.spec" % rpm_name) sh.write_file(spec_filename, utils.expand_template(content, params), tracewriter=self.tracewriter) return spec_filename
def download_dependencies(self): """Download dependencies from `$deps_dir/download-requires`.""" # NOTE(aababilov): do not drop download_dir - it can be reused sh.mkdirslist(self.download_dir, tracewriter=self.tracewriter) pips_to_download = self._filter_download_requires() sh.write_file(self.download_requires_filename, "\n".join([str(req) for req in pips_to_download])) if not pips_to_download: return ([], []) # NOTE(aababilov): user could have changed persona, so, # check that all requirements are downloaded if (sh.isfile(self.downloaded_flag_file) and self._requirements_satisfied(pips_to_download, self.download_dir)): LOG.info("All python dependencies have been already downloaded") else: def try_download(attempt): LOG.info( "Downloading %s dependencies with pip (attempt %s)...", len(pips_to_download), attempt) output_filename = sh.joinpths( self.log_dir, "pip-download-attempt-%s.log" % (attempt)) pip_helper.download_dependencies(self.download_dir, pips_to_download, output_filename) utils.retry(self.MAX_PIP_DOWNLOAD_ATTEMPTS, self.PIP_DOWNLOAD_DELAY, try_download) # NOTE(harlowja): Mark that we completed downloading successfully sh.touch_file(self.downloaded_flag_file, die_if_there=False, quiet=True, tracewriter=self.tracewriter) pips_downloaded = [ pip_helper.extract_requirement(p) for p in pips_to_download ] what_downloaded = self._examine_download_dir(pips_downloaded, self.download_dir) return (pips_downloaded, what_downloaded)
def download_dependencies(self): """Download dependencies from `$deps_dir/download-requires`.""" # NOTE(aababilov): do not drop download_dir - it can be reused sh.mkdirslist(self.download_dir, tracewriter=self.tracewriter) pips_to_download = self._filter_download_requires() sh.write_file(self.download_requires_filename, "\n".join([str(req) for req in pips_to_download])) if not pips_to_download: return ([], []) # NOTE(aababilov): user could have changed persona, so, # check that all requirements are downloaded.... if self._requirements_satisfied(pips_to_download, self.download_dir): LOG.info("All python dependencies have been already downloaded") else: utils.retry(self.retries, self.retry_delay, self._try_download, pips_to_download) pips_downloaded = [ pip_helper.extract_requirement(p) for p in pips_to_download ] what_downloaded = self._examine_download_dir(pips_downloaded, self.download_dir) return (pips_downloaded, what_downloaded)
def _copy_startup_scripts(self, instance, spec_filename): common_init_content = utils.load_template("packaging", "common.init")[1] cmd = [self.specprint_executable] cmd.extend(['-f', spec_filename]) daemon_args = instance.get_option('daemon_args', default_value={}) spec_details = json.loads(sh.execute(cmd)[0]) for src in spec_details.get('sources', []): script = sh.basename(src) if not (script.endswith(".init")): continue target_filename = sh.joinpths(self.rpm_sources_dir, script) if sh.isfile(target_filename): continue bin_name = utils.strip_prefix_suffix(script, "openstack-", ".init") params = { "bin": bin_name, "package": bin_name.split("-", 1)[0], "daemon_args": daemon_args.get(bin_name, ''), } sh.write_file(target_filename, utils.expand_template(common_init_content, params))
def _create_deps_repo(self): for filename in sh.listdir(sh.joinpths(self.rpmbuild_dir, "RPMS"), recursive=True, files_only=True): sh.move(filename, self.deps_repo_dir, force=True) for filename in sh.listdir(sh.joinpths(self.rpmbuild_dir, "SRPMS"), recursive=True, files_only=True): sh.move(filename, self.deps_src_repo_dir, force=True) for repo_dir in self.deps_repo_dir, self.deps_src_repo_dir: cmdline = ["createrepo", repo_dir] LOG.info("Creating repo at %s" % repo_dir) sh.execute(cmdline) LOG.info("Writing %s to %s", self.REPO_FN, self.anvil_repo_filename) (_fn, content) = utils.load_template('packaging', self.REPO_FN) params = { "baseurl_bin": "file://%s" % self.deps_repo_dir, "baseurl_src": "file://%s" % self.deps_src_repo_dir } sh.write_file(self.anvil_repo_filename, utils.expand_template(content, params), tracewriter=self.tracewriter)
def _write_all_deps_package(self): spec_filename = sh.joinpths( self.rpmbuild_dir, "SPECS", "%s.spec" % self.OPENSTACK_DEPS_PACKAGE_NAME) # Clean out previous dirs. for dirname in (self.rpmbuild_dir, self.deps_repo_dir, self.deps_src_repo_dir): sh.deldir(dirname) sh.mkdirslist(dirname, tracewriter=self.tracewriter) def get_version_release(): right_now = datetime.now() components = [ str(right_now.year), str(right_now.month), str(right_now.day), ] return (".".join(components), right_now.strftime("%s")) (version, release) = get_version_release() spec_content = """Name: %s Version: %s Release: %s License: Apache 2.0 Summary: OpenStack dependencies BuildArch: noarch """ % (self.OPENSTACK_DEPS_PACKAGE_NAME, version, release) packages = {} for inst in self.instances: try: for pack in inst.packages: packages[pack["name"]] = pack except AttributeError: pass scripts = {} script_map = { "pre-install": "%pre", "post-install": "%post", "pre-uninstall": "%preun", "post-uninstall": "%postun", } for pack_name in sorted(packages.iterkeys()): pack = packages[pack_name] cont = [spec_content, "Requires: ", pack["name"]] version = pack.get("version") if version: cont.append(" ") cont.append(version) cont.append("\n") spec_content = "".join(cont) for script_name in script_map.iterkeys(): try: script_list = pack[script_name] except (KeyError, ValueError): continue script_body = scripts.get(script_name, "") script_body = "%s\n# %s\n" % (script_body, pack_name) for script in script_list: try: line = " ".join( sh.shellquote(word) for word in script["cmd"]) except (KeyError, ValueError): continue if script.get("ignore_failure"): ignore = " 2>/dev/null || true" else: ignore = "" script_body = "".join((script_body, line, ignore, "\n")) scripts[script_name] = script_body spec_content += "\n%description\n\n" for script_name in sorted(script_map.iterkeys()): try: script_body = scripts[script_name] except KeyError: pass else: spec_content = "%s\n%s\n%s\n" % ( spec_content, script_map[script_name], script_body) spec_content += "\n%files\n" sh.write_file(spec_filename, spec_content, tracewriter=self.tracewriter) cmdline = [ "rpmbuild", "-ba", "--define", "_topdir %s" % self.rpmbuild_dir, spec_filename, ] LOG.info("Building %s RPM" % self.OPENSTACK_DEPS_PACKAGE_NAME) sh.execute(cmdline)
def _build_dependencies(self): (pips_downloaded, package_files) = self.download_dependencies() # Analyze what was downloaded and eject things that were downloaded # by pip as a dependency of a download but which we do not want to # build or can satisfy by other means no_pips = [ pkg_resources.Requirement.parse(name).key for name in self.python_names ] no_pips.extend(self.ignore_pips) yum_map = self._get_known_yum_packages() pips_keys = set([p.key for p in pips_downloaded]) package_reqs = [] for filename in package_files: package_details = pip_helper.get_archive_details(filename) package_reqs.append((filename, package_details['req'])) def _filter_package_files(): yum_provided = [] req_names = [req.key for (filename, req) in package_reqs] package_rpm_names = self.py2rpm_helper.names_to_rpm_names( req_names) filtered_files = [] for filename, req in package_reqs: rpm_name = package_rpm_names[req.key] if req.key in no_pips: LOG.info(("Dependency %s was downloaded additionally " "but it is disallowed."), colorizer.quote(req)) continue if req.key in pips_keys: filtered_files.append(filename) continue # See if pip tried to download it but we already can satisfy # it via yum and avoid building it in the first place... rpm_info = self._find_yum_match(yum_map, req, rpm_name) if not rpm_info: filtered_files.append(filename) else: yum_provided.append((req, rpm_info)) LOG.info(("Dependency %s was downloaded additionally " "but it can be satisfied by %s from repository " "%s instead."), colorizer.quote(req), colorizer.quote(rpm_name), colorizer.quote(rpm_info['repo'])) return (filtered_files, yum_provided) LOG.info("Filtering %s downloaded files.", len(package_files)) filtered_package_files, yum_provided = _filter_package_files() if yum_provided: yum_buff = six.StringIO() for (req, rpm_info) in yum_provided: dep_info = { 'requirement': str(req), 'rpm': rpm_info, } yum_buff.write(json.dumps(dep_info)) yum_buff.write("\n") sh.append_file(self.yum_satisfies_filename, yum_buff.getvalue()) if not filtered_package_files: LOG.info("No SRPM package dependencies to build.") return for filename in package_files: if filename not in filtered_package_files: sh.unlink(filename) build_requires = six.StringIO() for (filename, req) in package_reqs: if filename in filtered_package_files: build_requires.write("%s # %s\n" % (req, sh.basename(filename))) sh.write_file(self.build_requires_filename, build_requires.getvalue()) # Now build them into SRPM rpm files. package_files = sorted(filtered_package_files) self.py2rpm_helper.build_all_srpms(package_files=package_files, tracewriter=self.tracewriter, jobs=self.jobs)
def build_binary(self): def is_src_rpm(path): if not path: return False if not sh.isfile(path): return False if not path.lower().endswith('.src.rpm'): return False return True def list_src_rpms(path): path_files = [] if sh.isdir(path): path_files = sh.listdir(path, filter_func=is_src_rpm) return sorted(path_files) build_requirements = self.requirements.get("build-requires") if build_requirements: utils.log_iterable(build_requirements, header="Installing build requirements", logger=LOG) self.helper.transaction(install_pkgs=build_requirements, tracewriter=self.tracewriter) for repo_name in self.REPOS: src_repo_dir = sh.joinpths(self.anvil_repo_dir, self.SRC_REPOS[repo_name]) src_repo_files = list_src_rpms(src_repo_dir) if not src_repo_files: continue utils.log_iterable( src_repo_files, header=('Building %s RPM packages from their' ' SRPMs for repo %s using %s jobs') % (len(src_repo_files), self.SRC_REPOS[repo_name], self._jobs), logger=LOG) makefile_path = sh.joinpths(self.deps_dir, "binary-%s.mk" % repo_name) marks_dir = sh.joinpths(self.deps_dir, "marks-binary") if not sh.isdir(marks_dir): sh.mkdirslist(marks_dir, tracewriter=self.tracewriter) rpmbuild_flags = "--rebuild" if self.opts.get("usr_only", False): rpmbuild_flags += " --define 'usr_only 1'" params = { "SRC_REPO_DIR": src_repo_dir, "RPMBUILD_FLAGS": rpmbuild_flags, "LOGS_DIR": self.log_dir, 'RPMTOP_DIR': self.rpmbuild_dir, } (_fn, content) = utils.load_template( sh.joinpths("packaging", "makefiles"), "binary.mk") sh.write_file(makefile_path, utils.expand_template(content, params), tracewriter=self.tracewriter) with sh.remove_before_after(self.rpmbuild_dir): self._create_rpmbuild_subdirs() self._execute_make(makefile_path, marks_dir) repo_dir = sh.joinpths(self.anvil_repo_dir, repo_name) for d in sh.listdir(self.rpmbuild_dir, dirs_only=True): self._move_rpm_files(sh.joinpths(d, "RPMS"), repo_dir) self._move_rpm_files(sh.joinpths(self.rpmbuild_dir, "RPMS"), repo_dir) self._create_repo(repo_name)
def _gather_pips_to_install(self, requires_files, extra_pips=None): """Analyze requires_files and extra_pips. Updates `self.forced_packages` and `self.pips_to_install`. Writes requirements to `self.gathered_requires_filename`. """ extra_pips = extra_pips or [] cmdline = [ self.multipip_executable, "--skip-requirements-regex", "python.*client", "--pip", self.pip_executable ] cmdline = cmdline + extra_pips + ["-r"] + requires_files cmdline.extend(["--ignore-package"]) cmdline.extend(OPENSTACK_PACKAGES) cmdline.extend(SKIP_PACKAGE_NAMES) cmdline.extend(self.python_names) stdout, stderr = sh.execute(cmdline, check_exit_code=False) self.pips_to_install = list(utils.splitlines_not_empty(stdout)) sh.write_file(self.gathered_requires_filename, "\n".join(self.pips_to_install)) utils.log_iterable(sorted(self.pips_to_install), logger=LOG, header="Full known python dependency list") incompatibles = collections.defaultdict(list) if stderr: current_name = '' for line in stderr.strip().splitlines(): if line.endswith(": incompatible requirements"): current_name = line.split(":", 1)[0].lower().strip() if current_name not in incompatibles: incompatibles[current_name] = [] else: incompatibles[current_name].append(line) for (name, lines) in incompatibles.items(): if not name: continue LOG.warn("Incompatible requirements found for %s", colorizer.quote(name, quote_color='red')) for line in lines: LOG.warn(line) if not self.pips_to_install: LOG.error("No dependencies for OpenStack found." "Something went wrong. Please check:") LOG.error("'%s'" % "' '".join(cmdline)) raise exc.DependencyException( "No dependencies for OpenStack found") # Translate those that we altered requirements for into a set of forced # requirements file (and associated list). self.forced_packages = [] for req in [ pip_helper.extract_requirement(line) for line in self.pips_to_install ]: if req.key in incompatibles: self.forced_packages.append(req) sh.write_file(self.forced_requires_filename, "\n".join([str(req) for req in self.forced_packages]))
def _build_dependencies(self): (pips_downloaded, package_files) = self.download_dependencies() # Analyze what was downloaded and eject things that were downloaded # by pip as a dependency of a download but which we do not want to # build or can satisfy by other means no_pips = [ pkg_resources.Requirement.parse(name).key for name in self.python_names ] yum_map = self._get_known_yum_packages() pips_keys = set([p.key for p in pips_downloaded]) def _filter_package_files(package_files): package_reqs = [] package_keys = [] for filename in package_files: package_details = pip_helper.get_archive_details(filename) package_reqs.append(package_details['req']) package_keys.append(package_details['req'].key) package_rpm_names = self._convert_names_python2rpm(package_keys) filtered_files = [] for (filename, req, rpm_name) in zip(package_files, package_reqs, package_rpm_names): if req.key in no_pips: LOG.info(("Dependency %s was downloaded additionally " "but it is disallowed."), colorizer.quote(req)) continue if req.key in pips_keys: filtered_files.append(filename) continue # See if pip tried to download it but we already can satisfy # it via yum and avoid building it in the first place... (_version, repo) = self._find_yum_match(yum_map, req, rpm_name) if not repo: filtered_files.append(filename) else: LOG.info(("Dependency %s was downloaded additionally " "but it can be satisfied by %s from repository " "%s instead."), colorizer.quote(req), colorizer.quote(rpm_name), colorizer.quote(repo)) return filtered_files LOG.info("Filtering %s downloaded files.", len(package_files)) filtered_package_files = _filter_package_files(package_files) if not filtered_package_files: LOG.info("No SRPM package dependencies to build.") return for filename in package_files: if filename not in filtered_package_files: sh.unlink(filename) package_files = sorted(filtered_package_files) # Now build them into SRPM rpm files. (_fn, content) = utils.load_template(sh.joinpths("packaging", "makefiles"), "source.mk") scripts_dir = sh.abspth( sh.joinpths(settings.TEMPLATE_DIR, "packaging", "scripts")) py2rpm_options = self._py2rpm_start_cmdline()[1:] + [ "--scripts-dir", scripts_dir, "--source-only", "--rpm-base", self.rpmbuild_dir, ] params = { "DOWNLOADS_DIR": self.download_dir, "LOGS_DIR": self.log_dir, "PY2RPM": self.py2rpm_executable, "PY2RPM_FLAGS": " ".join(py2rpm_options), } marks_dir = sh.joinpths(self.deps_dir, "marks-deps") if not sh.isdir(marks_dir): sh.mkdirslist(marks_dir, tracewriter=self.tracewriter) makefile_path = sh.joinpths(self.deps_dir, "deps.mk") sh.write_file(makefile_path, utils.expand_template(content, params), tracewriter=self.tracewriter) utils.log_iterable(package_files, header="Building %s SRPM packages using %s jobs" % (len(package_files), self._jobs), logger=LOG) self._execute_make(makefile_path, marks_dir)