Exemple #1
0
 def stop(self, app_name):
     trace_dir = self.runtime.get_option('trace_dir')
     if not sh.isdir(trace_dir):
         msg = "No trace directory found from which to stop: %s" % (app_name)
         raise excp.StopException(msg)
     with sh.Rooted(True):
         fn_name = FORK_TEMPL % (app_name)
         (pid_file, stderr_fn, stdout_fn) = self._form_file_names(fn_name)
         pid = self._extract_pid(pid_file)
         if not pid:
             msg = "Could not extract a valid pid from %s" % (pid_file)
             raise excp.StopException(msg)
         (killed, attempts) = sh.kill(pid)
         # Trash the files if it worked
         if killed:
             LOG.debug("Killed pid %s after %s attempts." % (pid, attempts))
             LOG.debug("Removing pid file %s" % (pid_file))
             sh.unlink(pid_file)
             LOG.debug("Removing stderr file %r" % (stderr_fn))
             sh.unlink(stderr_fn)
             LOG.debug("Removing stdout file %r" % (stdout_fn))
             sh.unlink(stdout_fn)
             trace_fn = tr.trace_filename(trace_dir, fn_name)
             if sh.isfile(trace_fn):
                 LOG.debug("Removing %r trace file %r" % (app_name, trace_fn))
                 sh.unlink(trace_fn)
         else:
             msg = "Could not stop %r after %s attempts" % (app_name, attempts)
             raise excp.StopException(msg)
 def _get_env(self):
     env_addons = {}
     tox_fn = sh.joinpths(self.get_option("app_dir"), "tox.ini")
     if sh.isfile(tox_fn):
         # Suck out some settings from the tox file
         try:
             tox_cfg = cfg.BuiltinConfigParser(fns=[tox_fn])
             env_values = tox_cfg.get("testenv", "setenv") or ""
             for env_line in env_values.splitlines():
                 env_line = env_line.strip()
                 env_line = env_line.split("#")[0].strip()
                 if not env_line:
                     continue
                 env_entry = env_line.split("=", 1)
                 if len(env_entry) == 2:
                     (name, value) = env_entry
                     name = name.strip()
                     value = value.strip()
                     if name.lower() != "virtual_env":
                         env_addons[name] = value
             if env_addons:
                 LOG.debug("From %s we read in %s environment settings:", tox_fn, len(env_addons))
                 utils.log_object(env_addons, logger=LOG, level=logging.DEBUG)
         except IOError:
             pass
     return env_addons
Exemple #3
0
 def _clean_pip_requires(self):
     # Fixup these files if they exist (sometimes they have 'junk' in them)
     req_fns = []
     for fn in self.requires_files:
         if not sh.isfile(fn):
             continue
         req_fns.append(fn)
     if req_fns:
         utils.log_iterable(req_fns, logger=LOG,
                            header="Adjusting %s pip 'requires' files" % (len(req_fns)))
         for fn in req_fns:
             new_lines = []
             for line in sh.load_file(fn).splitlines():
                 s_line = line.strip()
                 if len(s_line) == 0:
                     continue
                 elif s_line.startswith("#"):
                     new_lines.append(s_line)
                 elif not self._filter_pip_requires_line(fn, s_line):
                     new_lines.append(("# %s" % (s_line)))
                 else:
                     new_lines.append(s_line)
             contents = "# Cleaned on %s\n\n%s\n" % (utils.iso8601(), "\n".join(new_lines))
             sh.write_file_and_backup(fn, contents)
     return len(req_fns)
Exemple #4
0
 def _build_openstack_package(self, instance):
     params = self._package_parameters(instance)
     patches = instance.list_patches("package")
     params['patches'] = [sh.basename(fn) for fn in patches]
     (rpm_name, template_name) = self._get_template_and_rpm_name(instance)
     try:
         egg_name = instance.egg_info['name']
         params["version"] = instance.egg_info["version"]
         if self._is_client(instance.name, egg_name):
             client_name = utils.strip_prefix_suffix(egg_name, "python-", "client")
             if not client_name:
                 msg = "Bad client package name %s" % (egg_name)
                 raise excp.PackageException(msg)
             params["clientname"] = client_name
             params["apiname"] = self.API_NAMES.get(client_name,
                                                    client_name.title())
     except AttributeError:
         spec_filename = None
         if template_name:
             spec_filename = sh.joinpths(settings.TEMPLATE_DIR,
                                         self.SPEC_TEMPLATE_DIR,
                                         template_name)
         if not spec_filename or not sh.isfile(spec_filename):
             rpm_name = None
     if rpm_name:
         if not template_name:
             template_name = "%s.spec" % rpm_name
         spec_filename = self._write_spec_file(instance, rpm_name,
                                               template_name, params)
         self._build_from_spec(instance, spec_filename, patches)
     else:
         self._build_from_app_dir(instance, params)
Exemple #5
0
 def _get_env(self):
     env_addons = DEFAULT_ENV.copy()
     tox_fn = sh.joinpths(self.get_option('app_dir'), 'tox.ini')
     if sh.isfile(tox_fn):
         # Suck out some settings from the tox file
         try:
             tox_cfg = ini_parser.BuiltinConfigParser(fns=[tox_fn])
             env_values = tox_cfg.get('testenv', 'setenv') or ''
             for env_line in env_values.splitlines():
                 env_line = env_line.strip()
                 env_line = env_line.split("#")[0].strip()
                 if not env_line:
                     continue
                 env_entry = env_line.split('=', 1)
                 if len(env_entry) == 2:
                     (name, value) = env_entry
                     name = name.strip()
                     value = value.strip()
                     if name.lower() != 'virtual_env':
                         env_addons[name] = value
             if env_addons:
                 LOG.debug("From %s we read in %s environment settings:", tox_fn, len(env_addons))
                 utils.log_object(env_addons, logger=LOG, level=logging.DEBUG)
         except IOError:
             pass
     return env_addons
 def stop(self, app_name):
     trace_dir = self.runtime.get_option('trace_dir')
     if not sh.isdir(trace_dir):
         msg = "No trace directory found from which to stop: %s" % (app_name)
         raise excp.StopException(msg)
     with sh.Rooted(True):
         fn_name = FORK_TEMPL % (app_name)
         (pid_file, stderr_fn, stdout_fn) = self._form_file_names(fn_name)
         pid = self._extract_pid(pid_file)
         if not pid:
             msg = "Could not extract a valid pid from %s" % (pid_file)
             raise excp.StopException(msg)
         (killed, attempts) = sh.kill(pid)
         # Trash the files if it worked
         if killed:
             LOG.debug("Killed pid %s after %s attempts." % (pid, attempts))
             LOG.debug("Removing pid file %s" % (pid_file))
             sh.unlink(pid_file)
             LOG.debug("Removing stderr file %r" % (stderr_fn))
             sh.unlink(stderr_fn)
             LOG.debug("Removing stdout file %r" % (stdout_fn))
             sh.unlink(stdout_fn)
             trace_fn = tr.trace_fn(trace_dir, fn_name)
             if sh.isfile(trace_fn):
                 LOG.debug("Removing %r trace file %r" % (app_name, trace_fn))
                 sh.unlink(trace_fn)
         else:
             msg = "Could not stop %r after %s attempts" % (app_name, attempts)
             raise excp.StopException(msg)
Exemple #7
0
 def _get_env(self):
     env_addons = DEFAULT_ENV.copy()
     tox_fn = sh.joinpths(self.get_option('app_dir'), 'tox.ini')
     if sh.isfile(tox_fn):
         # Suck out some settings from the tox file
         try:
             tox_cfg = ini_parser.BuiltinConfigParser(fns=[tox_fn])
             env_values = tox_cfg.get('testenv', 'setenv') or ''
             for env_line in env_values.splitlines():
                 env_line = env_line.strip()
                 env_line = env_line.split("#")[0].strip()
                 if not env_line:
                     continue
                 env_entry = env_line.split('=', 1)
                 if len(env_entry) == 2:
                     (name, value) = env_entry
                     name = name.strip()
                     value = value.strip()
                     if name.lower() != 'virtual_env':
                         env_addons[name] = value
             if env_addons:
                 LOG.debug("From %s we read in %s environment settings:",
                           tox_fn, len(env_addons))
                 utils.log_object(env_addons,
                                  logger=LOG,
                                  level=logging.DEBUG)
         except IOError:
             pass
     return env_addons
Exemple #8
0
 def _do_network_init(self):
     ran_fn = self.net_init_fn
     if not sh.isfile(ran_fn) and self.get_bool_option("do-network-init"):
         # Figure out the commands to run
         cmds = []
         mp = {"CFG_FILE": self.config_path, "BIN_DIR": self.bin_dir}
         mp["BIN_DIR"] = self.bin_dir
         if self.get_bool_option("enable_fixed"):
             # Create a fixed network
             mp["FIXED_NETWORK_SIZE"] = self.get_option("fixed_network_size", default_value="256")
             mp["FIXED_RANGE"] = self.get_option("fixed_range", default_value="10.0.0.0/24")
             cmds.extend(FIXED_NET_CMDS)
         if self.get_bool_option("enable_floating"):
             # Create a floating network + test floating pool
             cmds.extend(FLOATING_NET_CMDS)
             mp["FLOATING_RANGE"] = self.get_option("floating_range", default_value="172.24.4.224/28")
             mp["TEST_FLOATING_RANGE"] = self.get_option("test_floating_range", default_value="192.168.253.0/29")
             mp["TEST_FLOATING_POOL"] = self.get_option("test_floating_pool", default_value="test")
         # Anything to run??
         if cmds:
             LOG.info("Creating your nova network to be used with instances.")
             utils.execute_template(*cmds, params=mp)
         # Writing this makes sure that we don't init again
         cmd_mp = {"cmds": cmds, "replacements": mp}
         sh.write_file(ran_fn, utils.prettify_yaml(cmd_mp))
         LOG.info("If you wish to re-run network initialization, delete %s", colorizer.quote(ran_fn))
Exemple #9
0
 def _get_package_dirs(instances):
     package_dirs = []
     for inst in instances:
         app_dir = inst.get_option("app_dir")
         if sh.isfile(sh.joinpths(app_dir, "setup.py")):
             package_dirs.append(app_dir)
     return package_dirs
Exemple #10
0
 def _copy_startup_scripts(self, spec_filename):
     common_init_content = utils.load_template("packaging",
                                               "common.init")[1]
     for src in rpm.spec(spec_filename).sources:
         script = sh.basename(src[0])
         if not (script.endswith(".init")):
             continue
         target_filename = sh.joinpths(self.rpm_sources_dir, script)
         if sh.isfile(target_filename):
             continue
         bin_name = utils.strip_prefix_suffix(script, "openstack-", ".init")
         if bin_name == "quantum-server":
             daemon_args = ("'--config-file=/etc/quantum/plugin.ini"
                            " --config-file=/etc/quantum/quantum.conf'")
         elif bin_name == "quantum-l3-agent":
             daemon_args = ("'--config-file=/etc/quantum/l3_agent.ini"
                            " --config-file=/etc/quantum/quantum.conf'")
         elif bin_name == "quantum-dhcp-agent":
             daemon_args = ("'--config-file=/etc/quantum/dhcp_agent.ini"
                            " --config-file=/etc/quantum/quantum.conf'")
         else:
             daemon_args = ""
         params = {
             "bin": bin_name,
             "package": bin_name.split("-", 1)[0],
             "daemon_args": daemon_args,
         }
         sh.write_file(target_filename,
                       utils.expand_template(common_init_content, params))
Exemple #11
0
 def _build_openstack_package(self, instance):
     params = self._package_parameters(instance)
     patches = instance.list_patches("package")
     params['patches'] = [sh.basename(fn) for fn in patches]
     (rpm_name, template_name) = self._get_template_and_rpm_name(instance)
     try:
         egg_name = instance.egg_info['name']
         params["version"] = instance.egg_info["version"]
         if self._is_client(instance.name, egg_name):
             client_name = utils.strip_prefix_suffix(
                 egg_name, "python-", "client")
             if not client_name:
                 msg = "Bad client package name %s" % (egg_name)
                 raise excp.PackageException(msg)
             params["clientname"] = client_name
             params["apiname"] = self.API_NAMES.get(client_name,
                                                    client_name.title())
     except AttributeError:
         spec_filename = None
         if template_name:
             spec_filename = sh.joinpths(settings.TEMPLATE_DIR,
                                         self.SPEC_TEMPLATE_DIR,
                                         template_name)
         if not spec_filename or not sh.isfile(spec_filename):
             rpm_name = None
     if rpm_name:
         if not template_name:
             template_name = "%s.spec" % rpm_name
         spec_filename = self._write_spec_file(instance, rpm_name,
                                               template_name, params)
         self._build_from_spec(instance, spec_filename, patches)
     else:
         self._build_from_app_dir(instance, params)
Exemple #12
0
 def download_dependencies(self):
     """Download dependencies from `$deps_dir/download-requires`."""
     # NOTE(aababilov): do not drop download_dir - it can be reused
     sh.mkdirslist(self.download_dir, tracewriter=self.tracewriter)
     pips_to_download = self._filter_download_requires()
     sh.write_file(self.download_requires_filename,
                   "\n".join([str(req) for req in pips_to_download]))
     if not pips_to_download:
         return ([], [])
     # NOTE(aababilov): user could have changed persona, so,
     # check that all requirements are downloaded
     if (sh.isfile(self.downloaded_flag_file) and
             self._requirements_satisfied(pips_to_download, self.download_dir)):
         LOG.info("All python dependencies have been already downloaded")
     else:
         def try_download(attempt):
             LOG.info("Downloading %s dependencies with pip (attempt %s)...",
                      len(pips_to_download), attempt)
             output_filename = sh.joinpths(self.log_dir,
                                           "pip-download-attempt-%s.log" % (attempt))
             pip_helper.download_dependencies(self.download_dir,
                                              pips_to_download,
                                              output_filename)
         utils.retry(self.MAX_PIP_DOWNLOAD_ATTEMPTS,
                     self.PIP_DOWNLOAD_DELAY, try_download)
         # NOTE(harlowja): Mark that we completed downloading successfully
         sh.touch_file(self.downloaded_flag_file, die_if_there=False,
                       quiet=True, tracewriter=self.tracewriter)
     pips_downloaded = [pip_helper.extract_requirement(p) for p in pips_to_download]
     what_downloaded = self._examine_download_dir(pips_downloaded, self.download_dir)
     return (pips_downloaded, what_downloaded)
Exemple #13
0
 def _do_network_init(self):
     if not sh.isfile(
             self.net_init_fn) and self.get_bool_option('do-network-init'):
         # Figure out the commands to run
         cmds = []
         mp = {}
         if self.get_bool_option('enable_fixed'):
             # Create a fixed network
             mp['FIXED_NETWORK_SIZE'] = self.get_option(
                 'fixed_network_size', default_value='256')
             mp['FIXED_RANGE'] = self.get_option(
                 'fixed_range', default_value='10.0.0.0/24')
             cmds.extend(FIXED_NET_CMDS)
         if self.get_bool_option('enable_floating'):
             # Create a floating network + test floating pool
             cmds.extend(FLOATING_NET_CMDS)
             mp['FLOATING_RANGE'] = self.get_option(
                 'floating_range', default_value='172.24.4.224/28')
             mp['TEST_FLOATING_RANGE'] = self.get_option(
                 'test_floating_range', default_value='192.168.253.0/29')
             mp['TEST_FLOATING_POOL'] = self.get_option(
                 'test_floating_pool', default_value='test')
         # Anything to run??
         if cmds:
             LOG.info(
                 "Creating your nova network to be used with instances.")
             utils.execute_template(*cmds, params=mp)
         # Writing this makes sure that we don't init again
         cmd_mp = {
             'cmds': cmds,
             'replacements': mp,
         }
         sh.write_file(self.net_init_fn, utils.prettify_yaml(cmd_mp))
         LOG.info("If you wish to re-run network initialization, delete %s",
                  colorizer.quote(self.net_init_fn))
Exemple #14
0
 def _do_network_init(self):
     ran_fn = self.net_init_fn
     if not sh.isfile(ran_fn) and self.get_bool_option('do-network-init'):
         # Figure out the commands to run
         cmds = []
         mp = {
             'CFG_FILE': self.config_path,
             'BIN_DIR': self.bin_dir
         }
         mp['BIN_DIR'] = self.bin_dir
         if self.get_bool_option('enable_fixed'):
             # Create a fixed network
             mp['FIXED_NETWORK_SIZE'] = self.get_option('fixed_network_size', default_value='256')
             mp['FIXED_RANGE'] = self.get_option('fixed_range', default_value='10.0.0.0/24')
             cmds.extend(FIXED_NET_CMDS)
         if self.get_bool_option('enable_floating'):
             # Create a floating network + test floating pool
             cmds.extend(FLOATING_NET_CMDS)
             mp['FLOATING_RANGE'] = self.get_option('floating_range', default_value='172.24.4.224/28')
             mp['TEST_FLOATING_RANGE'] = self.get_option('test_floating_range', default_value='192.168.253.0/29')
             mp['TEST_FLOATING_POOL'] = self.get_option('test_floating_pool', default_value='test')
         # Anything to run??
         if cmds:
             LOG.info("Creating your nova network to be used with instances.")
             utils.execute_template(*cmds, params=mp)
         # Writing this makes sure that we don't init again
         cmd_mp = {
             'cmds': cmds,
             'replacements': mp,
         }
         sh.write_file(ran_fn, utils.prettify_yaml(cmd_mp))
         LOG.info("If you wish to re-run network initialization, delete %s", colorizer.quote(ran_fn))
Exemple #15
0
 def _clean_pip_requires(self):
     # Fixup these files if they exist (sometimes they have 'junk' in them)
     req_fns = []
     for fn in self.requires_files:
         if not sh.isfile(fn):
             continue
         req_fns.append(fn)
     if req_fns:
         utils.log_iterable(req_fns, logger=LOG,
             header="Adjusting %s pip 'requires' files" % (len(req_fns)))
         for fn in req_fns:
             new_lines = []
             for line in sh.load_file(fn).splitlines():
                 s_line = line.strip()
                 if len(s_line) == 0:
                     continue
                 elif s_line.startswith("#"):
                     new_lines.append(s_line)
                 elif not self._filter_pip_requires_line(s_line):
                     new_lines.append(("# %s" % (s_line)))
                 else:
                     new_lines.append(s_line)
             contents = "# Cleaned on %s\n\n%s\n" % (utils.iso8601(), "\n".join(new_lines))
             sh.write_file_and_backup(fn, contents)
     return len(req_fns)
Exemple #16
0
 def get_file_arg(name):
     fn = args.pop("%s_fn" % name)
     if not fn:
         raise excp.OptionException("No %s file name specified!" % (name))
     if not sh.isfile(fn):
         raise excp.OptionException("Invalid %s file %s specified!" % (name, fn))
     return fn
Exemple #17
0
 def _get_package_dirs(instances):
     package_dirs = []
     for inst in instances:
         app_dir = inst.get_option("app_dir")
         if sh.isfile(sh.joinpths(app_dir, "setup.py")):
             package_dirs.append(app_dir)
     return package_dirs
Exemple #18
0
 def _get_env(self):
     env_addons = DEFAULT_ENV.copy()
     tox_fn = sh.joinpths(self.get_option("app_dir"), "tox.ini")
     if sh.isfile(tox_fn):
         # Suck out some settings from the tox file
         try:
             tox_cfg = cfg.BuiltinConfigParser(fns=[tox_fn])
             env_values = tox_cfg.get("testenv", "setenv") or ""
             for env_line in env_values.splitlines():
                 env_line = env_line.strip()
                 env_line = env_line.split("#")[0].strip()
                 if not env_line:
                     continue
                 env_entry = env_line.split("=", 1)
                 if len(env_entry) == 2:
                     (name, value) = env_entry
                     name = name.strip()
                     value = value.strip()
                     if name.lower() != "virtual_env":
                         env_addons[name] = value
             if env_addons:
                 LOG.debug("From %s we read in %s environment settings:", tox_fn, len(env_addons))
                 utils.log_object(env_addons, logger=LOG, level=logging.DEBUG)
         except IOError:
             pass
     if not colorizer.color_enabled():
         env_addons["NOSE_OPENSTACK_COLOR"] = "0"
     if self.get_bool_option("verbose", default_value=True):
         env_addons["NOSE_OPENSTACK_STDOUT"] = "1"
     return env_addons
Exemple #19
0
 def _build_from_spec(self, instance, spec_filename, patches=None):
     pkg_dir = instance.get_option('app_dir')
     if sh.isfile(sh.joinpths(pkg_dir, "setup.py")):
         self._write_python_tarball(instance, pkg_dir, ENSURE_NOT_MISSING)
     else:
         self._write_git_tarball(instance, pkg_dir, spec_filename)
     self._copy_sources(instance)
     if patches:
         self._copy_patches(patches)
     cmdline = [self.specprint_executable]
     cmdline.extend(['-f', spec_filename])
     spec_details = json.loads(sh.execute(cmdline)[0])
     rpm_requires = []
     for k in ('requires', 'requirenevrs'):
         try:
             rpm_requires.extend(spec_details['headers'][k])
         except (KeyError, TypeError):
             pass
     if rpm_requires:
         buff = six.StringIO()
         buff.write("# %s\n" % instance.name)
         if rpm_requires:
             for req in rpm_requires:
                 buff.write("%s\n" % req)
             buff.write("\n")
         sh.append_file(self.rpm_build_requires_filename, buff.getvalue())
     self._copy_startup_scripts(instance, spec_details)
     cmdline = [
         self.rpmbuild_executable,
         "-bs",
         "--define", "_topdir %s" % self.rpmbuild_dir,
         spec_filename,
     ]
     out_filename = sh.joinpths(self.log_dir, "rpmbuild-%s.log" % instance.name)
     sh.execute_save_output(cmdline, out_filename)
Exemple #20
0
 def _do_network_init(self):
     ran_fn = sh.joinpths(self.get_option("trace_dir"), NET_INITED_FN)
     if not sh.isfile(ran_fn) and self.net_enabled:
         LOG.info("Creating your nova network to be used with instances.")
         # Figure out the commands to run
         mp = {}
         cmds = []
         mp["CFG_FILE"] = sh.joinpths(self.get_option("cfg_dir"), API_CONF)
         mp["BIN_DIR"] = sh.joinpths(self.get_option("app_dir"), BIN_DIR)
         if self.cfg.getboolean("nova", "enable_fixed"):
             # Create a fixed network
             mp["FIXED_NETWORK_SIZE"] = self.cfg.getdefaulted("nova", "fixed_network_size", "256")
             mp["FIXED_RANGE"] = self.cfg.getdefaulted("nova", "fixed_range", "10.0.0.0/24")
             cmds.extend(FIXED_NET_CMDS)
         if not self.get_option("quantum"):
             if self.cfg.getboolean("nova", "enable_floating"):
                 # Create a floating network + test floating pool
                 cmds.extend(FLOATING_NET_CMDS)
                 mp["FLOATING_RANGE"] = self.cfg.getdefaulted("nova", "floating_range", "172.24.4.224/28")
                 mp["TEST_FLOATING_RANGE"] = self.cfg.getdefaulted("nova", "test_floating_range", "192.168.253.0/29")
                 mp["TEST_FLOATING_POOL"] = self.cfg.getdefaulted("nova", "test_floating_pool", "test")
         else:
             LOG.info("Not creating floating IPs (not supported by quantum server)")
             LOG.info(
                 "Waiting %s seconds so that quantum can start up before running first time init." % (self.wait_time)
             )
             sh.sleep(self.wait_time)
         # Anything to run??
         if cmds:
             utils.execute_template(*cmds, params=mp)
         # Writing this makes sure that we don't init again
         cmd_mp = {"cmds": cmds, "replacements": mp}
         sh.write_file(ran_fn, utils.prettify_yaml(cmd_mp))
         LOG.info("If you wish to re-run initialization, delete %s", colorizer.quote(ran_fn))
Exemple #21
0
 def post_start(self):
     if not sh.isfile(self.init_fn) and self.get_bool_option("do-init"):
         self.wait_active()
         LOG.info("Running commands to initialize keystone.")
         (fn, contents) = utils.load_template(self.name, INIT_WHAT_FN)
         LOG.debug("Initializing with contents of %s", fn)
         params = {}
         params["keystone"] = khelper.get_shared_params(
             **utils.merge_dicts(self.options, khelper.get_shared_passwords(self))
         )
         params["glance"] = ghelper.get_shared_params(ip=self.get_option("ip"), **self.get_option("glance"))
         params["nova"] = nhelper.get_shared_params(ip=self.get_option("ip"), **self.get_option("nova"))
         wait_urls = [
             params["keystone"]["endpoints"]["admin"]["uri"],
             params["keystone"]["endpoints"]["public"]["uri"],
         ]
         for url in wait_urls:
             utils.wait_for_url(url)
         init_what = utils.load_yaml_text(contents)
         init_what = utils.expand_template_deep(self._filter_init(init_what), params)
         khelper.Initializer(
             params["keystone"]["service_token"], params["keystone"]["endpoints"]["admin"]["uri"]
         ).initialize(**init_what)
         # Writing this makes sure that we don't init again
         sh.write_file(self.init_fn, utils.prettify_yaml(init_what))
         LOG.info("If you wish to re-run initialization, delete %s", colorizer.quote(self.init_fn))
Exemple #22
0
 def post_start(self):
     if not sh.isfile(self.init_fn) and self.get_bool_option('do-init'):
         self.wait_active()
         LOG.info("Running commands to initialize keystone.")
         (fn, contents) = utils.load_template(self.name, INIT_WHAT_FN)
         LOG.debug("Initializing with contents of %s", fn)
         params = {}
         params['keystone'] = khelper.get_shared_params(**utils.merge_dicts(self.options, khelper.get_shared_passwords(self)))
         params['glance'] = ghelper.get_shared_params(ip=self.get_option('ip'), **self.get_option('glance'))
         params['nova'] = nhelper.get_shared_params(ip=self.get_option('ip'), **self.get_option('nova'))
         params['quantum'] = qhelper.get_shared_params(ip=self.get_option('ip'), **self.get_option('quantum'))
         params['cinder'] = chelper.get_shared_params(ip=self.get_option('ip'), **self.get_option('cinder'))
         wait_urls = [
             params['keystone']['endpoints']['admin']['uri'],
             params['keystone']['endpoints']['public']['uri'],
         ]
         for url in wait_urls:
             utils.wait_for_url(url)
         init_what = utils.load_yaml_text(contents)
         init_what = utils.expand_template_deep(self._filter_init(init_what), params)
         khelper.Initializer(params['keystone']['service_token'],
                             params['keystone']['endpoints']['admin']['uri']).initialize(**init_what)
         # Writing this makes sure that we don't init again
         sh.write_file(self.init_fn, utils.prettify_yaml(init_what))
         LOG.info("If you wish to re-run initialization, delete %s", colorizer.quote(self.init_fn))
Exemple #23
0
 def post_start(self):
     if not sh.isfile(self.init_fn) and self.get_bool_option('do-init'):
         self.wait_active()
         LOG.info("Running commands to initialize keystone.")
         (fn, contents) = utils.load_template(self.name, INIT_WHAT_FN)
         LOG.debug("Initializing with contents of %s", fn)
         params = {}
         params['keystone'] = khelper.get_shared_params(**utils.merge_dicts(self.options, khelper.get_shared_passwords(self)))
         params['glance'] = ghelper.get_shared_params(ip=self.get_option('ip'), **self.get_option('glance'))
         params['nova'] = nhelper.get_shared_params(ip=self.get_option('ip'), **self.get_option('nova'))
         params['neutron'] = net_helper.get_shared_params(ip=self.get_option('ip'), **self.get_option('neutron'))
         params['cinder'] = chelper.get_shared_params(ip=self.get_option('ip'), **self.get_option('cinder'))
         wait_urls = [
             params['keystone']['endpoints']['admin']['uri'],
             params['keystone']['endpoints']['public']['uri'],
         ]
         for url in wait_urls:
             utils.wait_for_url(url)
         init_what = utils.load_yaml_text(contents)
         init_what = utils.expand_template_deep(init_what, params)
         try:
             init_how = khelper.Initializer(params['keystone']['service_token'],
                                            params['keystone']['endpoints']['admin']['uri'])
             init_how.initialize(**init_what)
         except RuntimeError:
             LOG.exception("Failed to initialize keystone, is the keystone client library available?")
         else:
             # Writing this makes sure that we don't init again
             sh.write_file(self.init_fn, utils.prettify_yaml(init_what))
             LOG.info("If you wish to re-run initialization, delete %s", colorizer.quote(self.init_fn))
Exemple #24
0
    def _find_template_and_rpm_name(self, instance, build_name):
        search_names = [(build_name, "%s.spec" % build_name)]

        try:
            egg_name = instance.egg_info['name']
        except AttributeError:
            pass
        else:
            if any(
                    s.endswith("client")
                    for s in (instance.name, egg_name, build_name)):
                search_names.append([egg_name, "python-commonclient.spec"])
            search_names.extend([
                ("openstack-%s" % (egg_name),
                 "openstack-%s.spec" % (egg_name)),
                (egg_name, "%s.spec" % (egg_name)),
            ])

        # Return the first that exists (if any from this list)
        for (rpm_name, template_name) in search_names:
            spec_filename = sh.joinpths(settings.TEMPLATE_DIR,
                                        self.SPEC_TEMPLATE_DIR, template_name)
            if sh.isfile(spec_filename):
                return (rpm_name, template_name)
        return (None, None)
 def _extract_pid(self, filename):
     if sh.isfile(filename):
         try:
             return int(sh.load_file(filename).strip())
         except ValueError:
             return None
     else:
         return None
Exemple #26
0
 def _extract_pid(self, filename):
     if sh.isfile(filename):
         try:
             return int(sh.load_file(filename).strip())
         except ValueError:
             return None
     else:
         return None
Exemple #27
0
 def _clean_it(self):
     cleaner_fn = sh.joinpths(self.get_option("app_dir"), BIN_DIR, CLEANER_DATA_CONF)
     if sh.isfile(cleaner_fn):
         LOG.info("Cleaning up your system by running nova cleaner script: %s", colorizer.quote(cleaner_fn))
         # These environment additions are important
         # in that they eventually affect how this script runs
         env = {"ENABLED_SERVICES": ",".join(self.subsystems.keys())}
         sh.execute(cleaner_fn, run_as_root=True, env_overrides=env)
Exemple #28
0
def read_requirement_files(files):
    result = []
    for filename in files:
        if sh.isfile(filename):
            LOG.debug('Parsing requirements from %s', filename)
            with open(filename) as f:
                result.extend(parse_requirements(f.read()))
    return result
Exemple #29
0
 def is_src_rpm(path):
     if not path:
         return False
     if not sh.isfile(path):
         return False
     if not path.lower().endswith('.src.rpm'):
         return False
     return True
Exemple #30
0
    def _cache(self, conf):
        """Cache config file into memory to avoid re-reading it from disk."""
        if conf not in self._cached:
            path = sh.joinpths(self._path, conf + self._conf_ext)
            if not sh.isfile(path):
                raise exceptions.YamlConfigNotFoundException(path)

            self._cached[conf] = utils.load_yaml(path) or {}
Exemple #31
0
    def _cache(self, conf):
        """Cache config file into memory to avoid re-reading it from disk."""
        if conf not in self._cached:
            path = sh.joinpths(self._path, conf + self._conf_ext)
            if not sh.isfile(path):
                raise exceptions.YamlConfigNotFoundException(path)

            self._cached[conf] = utils.load_yaml(path) or {}
Exemple #32
0
def read_requirement_files(files):
    result = []
    for filename in files:
        if sh.isfile(filename):
            LOG.debug('Parsing requirements from %s', filename)
            with open(filename) as f:
                result.extend(parse_requirements(f.read()))
    return result
Exemple #33
0
 def is_src_rpm(path):
     if not path:
         return False
     if not sh.isfile(path):
         return False
     if not path.lower().endswith('.src.rpm'):
         return False
     return True
Exemple #34
0
 def download_dependencies(self):
     """Download dependencies from `$deps_dir/download-requires`.
     """
     # NOTE(aababilov): do not drop download_dir - it can be reused
     sh.mkdirslist(self.download_dir, tracewriter=self.tracewriter)
     download_requires_filename = sh.joinpths(self.deps_dir,
                                              "download-requires")
     raw_pips_to_download = self.filter_download_requires()
     sh.write_file(download_requires_filename,
                   "\n".join(str(req) for req in raw_pips_to_download))
     if not raw_pips_to_download:
         return ([], [])
     downloaded_flag_file = sh.joinpths(self.deps_dir, "pip-downloaded")
     # NOTE(aababilov): user could have changed persona, so,
     # check that all requirements are downloaded
     if sh.isfile(downloaded_flag_file) and self._requirements_satisfied(
             raw_pips_to_download, self.download_dir):
         LOG.info("All python dependencies have been already downloaded")
     else:
         pip_dir = sh.joinpths(self.deps_dir, "pip")
         pip_download_dir = sh.joinpths(pip_dir, "download")
         pip_build_dir = sh.joinpths(pip_dir, "build")
         # NOTE(aababilov): do not clean the cache, it is always useful
         pip_cache_dir = sh.joinpths(self.deps_dir, "pip-cache")
         pip_failures = []
         for attempt in xrange(self.MAX_PIP_DOWNLOAD_ATTEMPTS):
             # NOTE(aababilov): pip has issues with already downloaded files
             sh.deldir(pip_dir)
             sh.mkdir(pip_download_dir, recurse=True)
             header = "Downloading %s python dependencies (attempt %s)"
             header = header % (len(raw_pips_to_download), attempt)
             utils.log_iterable(sorted(raw_pips_to_download),
                                logger=LOG,
                                header=header)
             failed = False
             try:
                 self._try_download_dependencies(attempt, raw_pips_to_download,
                                                 pip_download_dir,
                                                 pip_cache_dir, pip_build_dir)
                 pip_failures = []
             except exc.ProcessExecutionError as e:
                 LOG.exception("Failed downloading python dependencies")
                 pip_failures.append(e)
                 failed = True
             if not failed:
                 break
         for filename in sh.listdir(pip_download_dir, files_only=True):
             sh.move(filename, self.download_dir, force=True)
         sh.deldir(pip_dir)
         if pip_failures:
             raise pip_failures[-1]
         with open(downloaded_flag_file, "w"):
             pass
     pips_downloaded = [pip_helper.extract_requirement(p)
                        for p in raw_pips_to_download]
     self._examine_download_dir(pips_downloaded, self.download_dir)
     what_downloaded = sh.listdir(self.download_dir, files_only=True)
     return (pips_downloaded, what_downloaded)
Exemple #35
0
 def _process_includes(self, root):
     if root in self.included:
         return
     pth = sh.joinpths(self.base, "%s.yaml" % (root))
     if not sh.isfile(pth):
         self.included[root] = {}
         return
     self.included[root] = utils.load_yaml(pth)
     self.included[root] = self._do_include(self.included[root])
Exemple #36
0
 def _process_includes(self, root):
     if root in self.included:
         return
     pth = sh.joinpths(self.base, "%s.yaml" % (root))
     if not sh.isfile(pth):
         self.included[root] = {}
         return
     self.included[root] = utils.load_yaml(pth)
     self.included[root] = self._do_include(self.included[root])
 def _write_rc_file(self):
     fn = sh.abspth(settings.gen_rc_filename('core'))
     writer = env_rc.RcWriter(self.cfg, self.root_dir)
     if not sh.isfile(fn):
         LOG.info("Generating a file at %s that will contain your environment settings.", colorizer.quote(fn))
         writer.write(fn)
     else:
         LOG.info("Updating a file at %s that contains your environment settings.", colorizer.quote(fn))
         am_upd = writer.update(fn)
         LOG.info("Updated %s settings.", colorizer.quote(am_upd))
Exemple #38
0
 def _establish_passwords(self):
     pw_read = []
     for fn in self.password_files:
         if sh.isfile(fn):
             self.passwords.cache.update(utils.load_yaml(fn))
             pw_read.append(fn)
     if pw_read:
         utils.log_iterable(pw_read,
                            header="Updated passwords to be used from %s files" % len(pw_read),
                            logger=LOG)
 def _extract_pip_requires(self, fn):
     if not sh.isfile(fn):
         return []
     LOG.debug("Resolving dependencies from %s.", colorizer.quote(fn))
     pips_needed = pip_helper.parse_requirements(sh.load_file(fn))
     matchings = []
     for req in pips_needed:
         (pkg_info, from_pip) = self._match_pip_requires(req)
         matchings.append({"requirement": req, "package": pkg_info, "from_pip": from_pip, "needed_by": fn})
     return matchings
Exemple #40
0
 def _record_srpm_files(self, files):
     if not files:
         return
     buf = six.StringIO()
     for f in files:
         buf.write(f)
         buf.write("\n")
     if sh.isfile(self.generated_srpms_filename):
         sh.append_file(self.generated_srpms_filename, "\n" + buf.getvalue())
     else:
         sh.write_file(self.generated_srpms_filename, buf.getvalue())
Exemple #41
0
def read_requirement_files(files):
    pip_requirements = []
    for filename in files:
        if sh.isfile(filename):
            LOG.debug('Parsing requirements from %s', filename)
            with open(filename, 'rb') as fh:
                for line in fh:
                    LOG.debug(">> %s", line.strip())
            pip_requirements.extend(pip_req.parse_requirements(filename))
    return (pip_requirements,
            [req.req for req in pip_requirements])
Exemple #42
0
def load_previous_settings():
    settings_prev = None
    if sh.isfile(SETTINGS_FN):
        try:
            # Don't use sh here so that we always
            # read this (even if dry-run)    
            with open(SETTINGS_FN, 'r') as fh:
                settings_prev = utils.load_yaml_text(fh.read())
        except Exception:
            pass
    return settings_prev
Exemple #43
0
 def _fix_virt(self):
     virt_driver = nhelper.canon_virt_driver(self.get_option("virt_driver"))
     if virt_driver == "libvirt":
         virt_type = lv.canon_libvirt_type(self.get_option("libvirt_type"))
         if virt_type == "qemu":
             # On RHEL it appears a sym-link needs to be created
             # to enable qemu to actually work, apparently fixed
             # in RHEL 6.4.
             #
             # See: http://fedoraproject.org/wiki/Getting_started_with_OpenStack_EPEL
             if not sh.isfile("/usr/bin/qemu-system-x86_64"):
                 sh.symlink("/usr/libexec/qemu-kvm", "/usr/bin/qemu-system-x86_64", tracewriter=self.tracewriter)
Exemple #44
0
 def patch(self, section):
     what_patches = self.get_option('patches', section)
     (_from_uri, target_dir) = self._get_download_location()
     if not what_patches:
         what_patches = []
     canon_what_patches = []
     for path in what_patches:
         if sh.isdir(path):
             canon_what_patches.extend(sorted(sh.listdir(path, files_only=True)))
         elif sh.isfile(path):
             canon_what_patches.append(path)
     if canon_what_patches:
         patcher.apply_patches(canon_what_patches, target_dir)
Exemple #45
0
 def _clean_it(self):
     cleaner_fn = sh.joinpths(self.get_option('app_dir'), BIN_DIR,
                              CLEANER_DATA_CONF)
     if sh.isfile(cleaner_fn):
         LOG.info(
             "Cleaning up your system by running nova cleaner script: %s",
             colorizer.quote(cleaner_fn))
         # These environment additions are important
         # in that they eventually affect how this script runs
         env = {
             'ENABLED_SERVICES': ",".join(self.subsystems.keys()),
         }
         sh.execute(cleaner_fn, run_as_root=True, env_overrides=env)
Exemple #46
0
 def download_dependencies(self):
     """Download dependencies from `$deps_dir/download-requires`."""
     # NOTE(aababilov): do not drop download_dir - it can be reused
     sh.mkdirslist(self.download_dir, tracewriter=self.tracewriter)
     pips_to_download = self._filter_download_requires()
     sh.write_file(self.download_requires_filename,
                   "\n".join([str(req) for req in pips_to_download]))
     if not pips_to_download:
         return ([], [])
     # NOTE(aababilov): user could have changed persona, so,
     # check that all requirements are downloaded
     if (sh.isfile(self.downloaded_flag_file)
             and self._requirements_satisfied(pips_to_download,
                                              self.download_dir)):
         LOG.info("All python dependencies have been already downloaded")
     else:
         pip_failures = []
         for attempt in xrange(self.MAX_PIP_DOWNLOAD_ATTEMPTS):
             # NOTE(aababilov): pip has issues with already downloaded files
             for filename in sh.listdir(self.download_dir, files_only=True):
                 sh.unlink(filename)
             header = "Downloading %s python dependencies (attempt %s)"
             header = header % (len(pips_to_download), attempt + 1)
             utils.log_iterable(sorted(pips_to_download),
                                logger=LOG,
                                header=header)
             failed = False
             try:
                 self._try_download_dependencies(attempt + 1,
                                                 pips_to_download,
                                                 self.download_dir)
                 pip_failures = []
             except exc.ProcessExecutionError as e:
                 LOG.exception("Failed downloading python dependencies")
                 pip_failures.append(e)
                 failed = True
             if not failed:
                 break
         if pip_failures:
             raise pip_failures[-1]
         # NOTE(harlowja): Mark that we completed downloading successfully
         sh.touch_file(self.downloaded_flag_file,
                       die_if_there=False,
                       quiet=True,
                       tracewriter=self.tracewriter)
     pips_downloaded = [
         pip_helper.extract_requirement(p) for p in pips_to_download
     ]
     self._examine_download_dir(pips_downloaded, self.download_dir)
     return (pips_downloaded, sh.listdir(self.download_dir,
                                         files_only=True))
Exemple #47
0
 def _parse(self):
     fn = self.trace_fn
     if not sh.isfile(fn):
         msg = "No trace found at filename %s" % (fn)
         raise excp.NoTraceException(msg)
     contents = sh.load_file(fn)
     lines = contents.splitlines()
     accum = list()
     for line in lines:
         ep = self._split_line(line)
         if ep is None:
             continue
         accum.append(tuple(ep))
     return accum
Exemple #48
0
 def configure(self):
     configs_made = nova.NovaInstaller.configure(self)
     driver_canon = utils.canon_virt_driver(self.get_option('virt_driver'))
     if driver_canon == 'libvirt':
         # Create a libvirtd user group
         if not sh.group_exists('libvirtd'):
             cmd = ['groupadd', 'libvirtd']
             sh.execute(cmd)
         if not sh.isfile(LIBVIRT_POLICY_FN):
             contents = self._get_policy(self._get_policy_users())
             sh.mkdirslist(sh.dirname(LIBVIRT_POLICY_FN))
             sh.write_file(LIBVIRT_POLICY_FN, contents)
             configs_made += 1
     return configs_made
Exemple #49
0
 def _fix_virt(self):
     virt_driver = utils.canon_virt_driver(self.get_option('virt_driver'))
     if virt_driver == 'libvirt':
         virt_type = lv.canon_libvirt_type(self.get_option('libvirt_type'))
         if virt_type == 'qemu':
             # On RHEL it appears a sym-link needs to be created
             # to enable qemu to actually work, apparently fixed
             # in RHEL 6.4.
             #
             # See: http://fedoraproject.org/wiki/Getting_started_with_OpenStack_EPEL
             if not sh.isfile('/usr/bin/qemu-system-x86_64'):
                 sh.symlink('/usr/libexec/qemu-kvm',
                            '/usr/bin/qemu-system-x86_64',
                            tracewriter=self.tracewriter)
Exemple #50
0
 def list_patches(self, section):
     what_patches = self.get_option('patches', section)
     if not what_patches:
         what_patches = [sh.joinpths(settings.CONFIG_DIR, 'patches',
                                     self.name, section)]
     canon_what_patches = []
     for path in what_patches:
         if sh.isdir(path):
             patches = sorted(fn for fn in sh.listdir(path, files_only=True)
                              if fn.endswith('patch'))
             canon_what_patches.extend(patches)
         elif sh.isfile(path):
             canon_what_patches.append(path)
     return canon_what_patches
Exemple #51
0
 def _unpack_dir(self, dir_path):
     """Pick through a directory to figure out which files are which
     image pieces, and create a dict that describes them.
     """
     potential_files = set()
     for fn in self._filter_files(sh.listdir(dir_path)):
         full_fn = sh.joinpths(dir_path, fn)
         if sh.isfile(full_fn):
             potential_files.add(sh.canon_path(full_fn))
     (root_fn, ramdisk_fn, kernel_fn) = self._find_pieces(potential_files, dir_path)
     if not root_fn:
         msg = "Directory %r has no root image member" % (dir_path)
         raise IOError(msg)
     self._log_pieces_found('directory', root_fn, ramdisk_fn, kernel_fn)
     return self._describe(root_fn, ramdisk_fn, kernel_fn)
Exemple #52
0
 def _extract_pip_requires(self, fn):
     if not sh.isfile(fn):
         return []
     LOG.debug("Resolving dependencies from %s.", colorizer.quote(fn))
     pips_needed = pip_helper.parse_requirements(sh.load_file(fn))
     matchings = []
     for req in pips_needed:
         (pkg_info, from_pip) = self._match_pip_requires(req)
         matchings.append({
             'requirement': req,
             'package': pkg_info,
             'from_pip': from_pip,
             'needed_by': fn,
         })
     return matchings
Exemple #53
0
def read_requirement_files(files):
    pip_requirements = []
    session = pip_download.PipSession()
    for filename in files:
        if sh.isfile(filename):
            cache_key = "f:%s:%s" % (sh.abspth(filename), sh.getsize(filename))
            with REQUIREMENT_FILE_CACHE_LOCK:
                try:
                    reqs = REQUIREMENT_FILE_CACHE[cache_key]
                except KeyError:
                    reqs = tuple(
                        pip_req.parse_requirements(filename, session=session))
                    REQUIREMENT_FILE_CACHE[cache_key] = reqs
                pip_requirements.extend(reqs)
    return (pip_requirements, [req.req for req in pip_requirements])
Exemple #54
0
def load_examples():
    examples = []
    for filename in glob.glob(EXAMPLE_GLOB):
        if sh.isfile(filename):
            # The test generator will use the first element as the test
            # identifer so provide a filename + index based test identifer to
            # be able to connect test failures to the example which caused it.
            try:
                base = sh.basename(filename)
                base = re.sub(r"[.\s]", "_", base)
                for i, example in enumerate(utils.load_yaml(filename)):
                    examples.append(("%s_%s" % (base, i), example))
            except IOError:
                pass
    return examples
 def _get_test_command(self):
     # See: http://docs.openstack.org/developer/nova/devref/unit_tests.html
     # And: http://wiki.openstack.org/ProjectTestingInterface
     app_dir = self.get_option('app_dir')
     if sh.isfile(sh.joinpths(app_dir,
                              'run_tests.sh')) and self._use_run_tests():
         cmd = [sh.joinpths(app_dir, 'run_tests.sh'), '-N']
     else:
         # Assume tox is being used, which we can't use directly
         # since anvil doesn't really do venv stuff (its meant to avoid those...)
         cmd = ['nosetests']
     # See: $ man nosetests
     cmd.append('--nologcapture')
     for e in self._get_test_exclusions():
         cmd.append('--exclude=%s' % (e))
     return cmd
Exemple #56
0
def store_current_settings(c_settings):
    # Remove certain keys that just shouldn't be saved
    to_save = dict(c_settings)
    for k in ['action', 'verbose']:
        if k in c_settings:
            to_save.pop(k, None)
    buf = six.StringIO()
    buf.write("# Anvil last used settings\n")
    buf.write(
        utils.add_header(SETTINGS_FILE,
                         utils.prettify_yaml(to_save),
                         adjusted=sh.isfile(SETTINGS_FILE)))
    try:
        sh.write_file(SETTINGS_FILE, buf.getvalue())
    except OSError as e:
        LOG.warn("Failed writing to %s due to %s", SETTINGS_FILE, e)
Exemple #57
0
def _pick_message(pattern, def_message="This page is intentionally left blank."):
    if not pattern:
        return def_message
    expanded_pattern = sh.joinpths(settings.MESSAGING_DIR, pattern)
    file_matches = glob.glob(expanded_pattern)
    file_matches = [f for f in file_matches if sh.isfile(f)]
    try:
        file_selected = random.choice(file_matches)
        with open(file_selected, 'r') as fh:
            contents = fh.read()
        contents = contents.strip("\n\r")
        if not contents:
            contents = def_message
        return contents
    except (IndexError, IOError):
        return def_message
Exemple #58
0
def get_archive_details(filename):
    if not sh.isfile(filename):
        raise IOError("Can not detail non-existent file %s" % (filename))

    # Check if we already got the details of this file previously
    cache_key = "f:%s:%s" % (sh.basename(filename), sh.getsize(filename))
    if cache_key in EGGS_DETAILED:
        return EGGS_DETAILED[cache_key]

    # Get pip to get us the egg-info.
    with utils.tempdir() as td:
        filename = sh.copy(filename, sh.joinpths(td, sh.basename(filename)))
        extract_to = sh.mkdir(sh.joinpths(td, 'build'))
        pip_util.unpack_file(filename, extract_to, content_type='', link='')
        details = get_directory_details(extract_to)

    EGGS_DETAILED[cache_key] = details
    return details
 def _validate_cache(self, cache_path, details_path):
     for path in [cache_path, details_path]:
         if not sh.exists(path):
             return False
     check_files = []
     try:
         unpack_info = utils.load_yaml_text(sh.load_file(details_path))
         check_files.append(unpack_info['file_name'])
         if 'kernel' in unpack_info:
             check_files.append(unpack_info['kernel']['file_name'])
         if 'ramdisk' in unpack_info:
             check_files.append(unpack_info['ramdisk']['file_name'])
     except Exception:
         return False
     for path in check_files:
         if not sh.isfile(path):
             return False
     return True
Exemple #60
0
 def unpack(self, file_name, file_location, tmp_dir):
     if sh.isdir(file_location):
         return self._unpack_dir(file_location)
     elif sh.isfile(file_location):
         (_, fn_ext) = os.path.splitext(file_name)
         fn_ext = fn_ext.lower()
         if fn_ext in TAR_EXTS:
             return self._unpack_tar(file_name, file_location, tmp_dir)
         elif fn_ext in ['.img', '.qcow2']:
             info = dict()
             info['file_name'] = file_location
             if fn_ext == '.img':
                 info['disk_format'] = 'raw'
             else:
                 info['disk_format'] = 'qcow2'
             info['container_format'] = 'bare'
             return info
     msg = "Currently we do not know how to unpack %r" % (file_location)
     raise IOError(msg)