Esempio n. 1
0
File: venv.py Progetto: y2kbot/anvil
    def _install_into_venv(self, instance, requirements):
        venv_dir = self._venv_directory_for(instance)
        base_pip = [sh.joinpths(venv_dir, 'bin', 'pip')]
        env_overrides = {
            'PATH': os.pathsep.join([sh.joinpths(venv_dir, "bin"),
                                     env.get_key('PATH', default_value='')]),
            'VIRTUAL_ENV': venv_dir,
        }
        sh.mkdirslist(self.cache_dir, tracewriter=self.tracewriter)

        def try_install(attempt, requirements):
            cmd = list(base_pip) + ['install']
            cmd.extend([
                '--download-cache',
                self.cache_dir,
            ])
            if isinstance(requirements, six.string_types):
                cmd.extend([
                    '--requirement',
                    requirements
                ])
            else:
                for req in requirements:
                    cmd.append(str(req))
            sh.execute(cmd, env_overrides=env_overrides)

        # Sometimes pip fails downloading things, retry it when this happens...
        utils.retry(3, 5, try_install, requirements=requirements)
Esempio n. 2
0
File: base.py Progetto: jzako/anvil
 def _on_start(self, persona, groups):
     LOG.info("Booting up your components.")
     LOG.debug("Starting environment settings:")
     utils.log_object(env.get(), logger=LOG, level=logging.DEBUG, item_max_len=64)
     sh.mkdirslist(self.phase_dir)
     self._verify_components(groups)
     self._warm_components(groups)
Esempio n. 3
0
 def _create_repo(self, repo_name):
     repo_dir = sh.joinpths(self.anvil_repo_dir, repo_name)
     src_repo_dir = sh.joinpths(self.anvil_repo_dir, self.SRC_REPOS[repo_name])
     for a_dir in (repo_dir, src_repo_dir):
         if not sh.isdir(a_dir):
             sh.mkdirslist(a_dir, tracewriter=self.tracewriter)
         cmdline = ["createrepo", a_dir]
         LOG.info("Creating repo at %s", a_dir)
         sh.execute(cmdline)
     repo_filename = sh.joinpths(self.anvil_repo_dir, "%s.repo" % repo_name)
     LOG.info("Writing %s", repo_filename)
     (_fn, content) = utils.load_template("packaging", "common.repo")
     params = {
         "repo_name": repo_name,
         "baseurl_bin": "file://%s" % repo_dir,
         "baseurl_src": "file://%s" % src_repo_dir,
     }
     sh.write_file(repo_filename, utils.expand_template(content, params),
                   tracewriter=self.tracewriter)
     # Install *.repo file so that anvil deps will be available
     # when building OpenStack
     system_repo_filename = sh.joinpths(self.YUM_REPO_DIR, "%s.repo" % repo_name)
     sh.copy(repo_filename, system_repo_filename)
     LOG.info("Copying to %s", system_repo_filename)
     self.tracewriter.file_touched(system_repo_filename)
Esempio n. 4
0
 def ensure_image_storage(img_store_dir):
     if sh.isdir(img_store_dir):
         return
     LOG.debug("Ensuring file system store directory %r exists.",
               img_store_dir)
     sh.mkdirslist(img_store_dir,
                   tracewriter=self.installer.tracewriter)
Esempio n. 5
0
    def _config_adjust_api(self, config):
        config.add("core_plugin", self.plugin_configurator.PLUGIN_CLASS)
        config.add('auth_strategy', 'keystone')
        config.add("api_paste_config", self.target_config(PASTE_CONF))
        # TODO(aababilov): add debug to other services conf files
        config.add('debug', self.installer.get_bool_option("debug"))
        config.add("log_file", "quantum-server.log")
        config.add("log_dir", "/var/log/quantum")

        # Setup the interprocess locking directory
        # (don't put me on shared storage)
        lock_path = self.installer.get_option('lock_path')
        if not lock_path:
            lock_path = sh.joinpths(self.installer.get_option('component_dir'), 'locks')
        sh.mkdirslist(lock_path, tracewriter=self.installer.tracewriter)
        config.add('lock_path', lock_path)

        self.setup_rpc(config, 'quantum.openstack.common.rpc.impl_kombu')

        config.current_section = "AGENT"
        config.add("root_helper", "sudo quantum-rootwrap /etc/quantum/rootwrap.conf")

        config.current_section = "keystone_authtoken"
        for (k, v) in self._fetch_keystone_params().items():
            config.add(k, v)
Esempio n. 6
0
 def download_dependencies(self):
     """Download dependencies from `$deps_dir/download-requires`."""
     # NOTE(aababilov): do not drop download_dir - it can be reused
     sh.mkdirslist(self.download_dir, tracewriter=self.tracewriter)
     pips_to_download = self._filter_download_requires()
     sh.write_file(self.download_requires_filename,
                   "\n".join([str(req) for req in pips_to_download]))
     if not pips_to_download:
         return ([], [])
     # NOTE(aababilov): user could have changed persona, so,
     # check that all requirements are downloaded
     if (sh.isfile(self.downloaded_flag_file) and
             self._requirements_satisfied(pips_to_download, self.download_dir)):
         LOG.info("All python dependencies have been already downloaded")
     else:
         def try_download(attempt):
             LOG.info("Downloading %s dependencies with pip (attempt %s)...",
                      len(pips_to_download), attempt)
             output_filename = sh.joinpths(self.log_dir,
                                           "pip-download-attempt-%s.log" % (attempt))
             pip_helper.download_dependencies(self.download_dir,
                                              pips_to_download,
                                              output_filename)
         utils.retry(self.MAX_PIP_DOWNLOAD_ATTEMPTS,
                     self.PIP_DOWNLOAD_DELAY, try_download)
         # NOTE(harlowja): Mark that we completed downloading successfully
         sh.touch_file(self.downloaded_flag_file, die_if_there=False,
                       quiet=True, tracewriter=self.tracewriter)
     pips_downloaded = [pip_helper.extract_requirement(p) for p in pips_to_download]
     what_downloaded = self._examine_download_dir(pips_downloaded, self.download_dir)
     return (pips_downloaded, what_downloaded)
Esempio n. 7
0
 def build_all_srpms(self, package_files, tracewriter, jobs):
     (_fn,
      content) = utils.load_template(sh.joinpths("packaging", "makefiles"),
                                     "source.mk")
     scripts_dir = sh.abspth(
         sh.joinpths(settings.TEMPLATE_DIR, "packaging", "scripts"))
     cmdline = self._start_cmdline(escape_values=True)[1:] + [
         "--scripts-dir",
         scripts_dir,
         "--source-only",
         "--rpm-base",
         self._rpmbuild_dir,
         "--debug",
     ]
     executable = " ".join(self._start_cmdline()[0:1])
     params = {
         "DOWNLOADS_DIR": self._download_dir,
         "LOGS_DIR": self._log_dir,
         "PY2RPM": executable,
         "PY2RPM_FLAGS": " ".join(cmdline)
     }
     marks_dir = sh.joinpths(self._deps_dir, "marks-deps")
     if not sh.isdir(marks_dir):
         sh.mkdirslist(marks_dir, tracewriter=tracewriter)
     makefile_path = sh.joinpths(self._deps_dir, "deps.mk")
     sh.write_file(makefile_path,
                   utils.expand_template(content, params),
                   tracewriter=tracewriter)
     utils.log_iterable(package_files,
                        header="Building %s SRPM packages using %s jobs" %
                        (len(package_files), jobs),
                        logger=LOG)
     self._execute_make(makefile_path, marks_dir, jobs)
Esempio n. 8
0
 def _config_adjust_api_reg(self, contents, fn):
     gparams = ghelper.get_shared_params(**self.options)
     with io.BytesIO(contents) as stream:
         config = cfg.create_parser(cfg.RewritableConfigParser, self)
         config.readfp(stream)
         config.set('DEFAULT', 'debug', self.get_bool_option('verbose'))
         config.set('DEFAULT', 'verbose', self.get_bool_option('verbose'))
         if fn in [REG_CONF]:
             config.set('DEFAULT', 'bind_port', gparams['endpoints']['registry']['port'])
         else:
             config.set('DEFAULT', 'bind_port', gparams['endpoints']['public']['port'])
         config.set('DEFAULT', 'sql_connection', dbhelper.fetch_dbdsn(dbname=DB_NAME,
                                                                      utf8=True,
                                                                      dbtype=self.get_option('db', 'type'),
                                                                      **utils.merge_dicts(self.get_option('db'),
                                                                                          dbhelper.get_shared_passwords(self))))
         config.remove_option('DEFAULT', 'log_file')
         config.set('paste_deploy', 'flavor', self.get_option('paste_flavor'))
         for (k, v) in self._fetch_keystone_params().items():
             config.set('keystone_authtoken', k, v)
         if fn in [API_CONF]:
             config.set('DEFAULT', 'default_store', 'file')
             img_store_dir = sh.joinpths(self.get_option('component_dir'), 'images')
             config.set('DEFAULT', 'filesystem_store_datadir', img_store_dir)
             LOG.debug("Ensuring file system store directory %r exists and is empty." % (img_store_dir))
             if sh.isdir(img_store_dir):
                 sh.deldir(img_store_dir)
             sh.mkdirslist(img_store_dir, tracewriter=self.tracewriter, adjust_suids=True)
         return config.stringify(fn)
Esempio n. 9
0
File: base.py Progetto: y2kbot/anvil
    def download_dependencies(self):
        """Download dependencies from `$deps_dir/download-requires`."""
        # NOTE(aababilov): do not drop download_dir - it can be reused
        sh.mkdirslist(self.download_dir, tracewriter=self.tracewriter)
        pips_to_download = self._filter_download_requires()
        sh.write_file(self.download_requires_filename,
                      "\n".join([str(req) for req in pips_to_download]))
        if not pips_to_download:
            return ([], [])
        # NOTE(aababilov): user could have changed persona, so,
        # check that all requirements are downloaded....
        if self._requirements_satisfied(pips_to_download, self.download_dir):
            LOG.info("All python dependencies have been already downloaded")
        else:
            def on_download_finish(time_taken):
                LOG.info("Took %0.2f seconds to download...", time_taken)

            def try_download(attempt):
                LOG.info("Downloading %s dependencies with pip (attempt %s)...",
                         len(pips_to_download), attempt)
                output_filename = sh.joinpths(self.log_dir,
                                              "pip-download-attempt-%s.log" % (attempt))
                LOG.info("Please wait this may take a while...")
                LOG.info("Check %s for download activity details...",
                         colorizer.quote(output_filename))
                utils.time_it(on_download_finish,
                              pip_helper.download_dependencies,
                              self.download_dir,
                              pips_to_download,
                              output_filename)
            utils.retry(self.MAX_PIP_DOWNLOAD_ATTEMPTS,
                        self.PIP_DOWNLOAD_DELAY, try_download)
        pips_downloaded = [pip_helper.extract_requirement(p) for p in pips_to_download]
        what_downloaded = self._examine_download_dir(pips_downloaded, self.download_dir)
        return (pips_downloaded, what_downloaded)
Esempio n. 10
0
 def _configure_instances_path(self, instances_path, nova_conf):
     nova_conf.add('instances_path', instances_path)
     if not sh.isdir(instances_path):
         LOG.debug("Attempting to create instance directory: %r", instances_path)
         sh.mkdirslist(instances_path, tracewriter=self.tracewriter)
         LOG.debug("Adjusting permissions of instance directory: %r", instances_path)
         sh.chmod(instances_path, 0777)
Esempio n. 11
0
 def _yyoom(self, arglist, on_completed=None):
     if not on_completed:
         on_completed = lambda data, errored: None
     if not sh.isdir(self._logs_dir):
         sh.mkdirslist(self._logs_dir)
     with tempfile.NamedTemporaryFile(suffix=".json") as fh:
         cmdline = [
             self.yyoom_executable,
             "--output-file", fh.name,
             "--verbose",
         ]
         cmdline.extend(arglist)
         log_filename = sh.joinpths(self._logs_dir,
                                    _generate_log_filename(arglist))
         LOG.debug("Running yyoom: log output will be placed in %s",
                   log_filename)
         try:
             sh.execute_save_output(cmdline, log_filename)
         except excp.ProcessExecutionError:
             with excp.reraise():
                 try:
                     fh.seek(0)
                     data = utils.parse_json(fh.read())
                 except Exception:
                     LOG.exception("Failed to parse YYOOM output")
                 else:
                     on_completed(data, True)
         else:
             fh.seek(0)
             data = utils.parse_json(fh.read())
             on_completed(data, False)
             return data
Esempio n. 12
0
 def ensure_image_storage(img_store_dir):
     if sh.isdir(img_store_dir):
         return
     LOG.debug("Ensuring file system store directory %r exists.",
               img_store_dir)
     sh.mkdirslist(img_store_dir,
                   tracewriter=self.installer.tracewriter)
Esempio n. 13
0
 def build_all_srpms(self, package_files, tracewriter, jobs):
     (_fn, content) = utils.load_template(sh.joinpths("packaging", "makefiles"), "source.mk")
     scripts_dir = sh.abspth(sh.joinpths(settings.TEMPLATE_DIR, "packaging", "scripts"))
     cmdline = self._start_cmdline(escape_values=True)[1:] + [
         "--scripts-dir", scripts_dir,
         "--source-only",
         "--rpm-base", self._rpmbuild_dir
     ]
     executable = " ".join(self._start_cmdline()[0:1])
     params = {
         "DOWNLOADS_DIR": self._download_dir,
         "LOGS_DIR": self._log_dir,
         "PY2RPM": executable,
         "PY2RPM_FLAGS": " ".join(cmdline)
     }
     marks_dir = sh.joinpths(self._deps_dir, "marks-deps")
     if not sh.isdir(marks_dir):
         sh.mkdirslist(marks_dir, tracewriter=tracewriter)
     makefile_path = sh.joinpths(self._deps_dir, "deps.mk")
     sh.write_file(makefile_path, utils.expand_template(content, params),
                   tracewriter=tracewriter)
     utils.log_iterable(package_files,
                        header="Building %s SRPM packages using %s jobs" %
                               (len(package_files), jobs),
                        logger=LOG)
     self._execute_make(makefile_path, marks_dir, jobs)
Esempio n. 14
0
 def _yyoom(self, arglist, on_completed=None):
     if not on_completed:
         on_completed = lambda data, errored: None
     if not sh.isdir(self._logs_dir):
         sh.mkdirslist(self._logs_dir)
     with tempfile.NamedTemporaryFile(suffix=".json") as fh:
         cmdline = [
             self.yyoom_executable,
             "--output-file",
             fh.name,
             "--verbose",
         ]
         cmdline.extend(arglist)
         log_filename = sh.joinpths(self._logs_dir,
                                    _generate_log_filename(arglist))
         LOG.debug("Running yyoom: log output will be placed in %s",
                   log_filename)
         try:
             sh.execute_save_output(cmdline, log_filename)
         except excp.ProcessExecutionError:
             with excp.reraise():
                 try:
                     fh.seek(0)
                     data = utils.parse_json(fh.read())
                 except Exception:
                     LOG.exception("Failed to parse YYOOM output")
                 else:
                     on_completed(data, True)
         else:
             fh.seek(0)
             data = utils.parse_json(fh.read())
             on_completed(data, False)
             return data
Esempio n. 15
0
 def _create_repo(self, repo_name):
     repo_dir = sh.joinpths(self.anvil_repo_dir, repo_name)
     src_repo_dir = sh.joinpths(self.anvil_repo_dir,
                                self.SRC_REPOS[repo_name])
     for a_dir in (repo_dir, src_repo_dir):
         if not sh.isdir(a_dir):
             sh.mkdirslist(a_dir, tracewriter=self.tracewriter)
         cmdline = ["createrepo", a_dir]
         LOG.info("Creating repo at %s", a_dir)
         sh.execute(cmdline)
     repo_filename = sh.joinpths(self.anvil_repo_dir, "%s.repo" % repo_name)
     LOG.info("Writing %s", repo_filename)
     (_fn, content) = utils.load_template("packaging", "common.repo")
     params = {
         "repo_name": repo_name,
         "baseurl_bin": "file://%s" % repo_dir,
         "baseurl_src": "file://%s" % src_repo_dir,
     }
     sh.write_file(repo_filename,
                   utils.expand_template(content, params),
                   tracewriter=self.tracewriter)
     # NOTE(harlowja): Install *.repo file so that anvil deps will be available
     # when building openstack core project packages.
     system_repo_filename = sh.joinpths(self.YUM_REPO_DIR,
                                        "%s.repo" % repo_name)
     sh.copy(repo_filename,
             system_repo_filename,
             tracewriter=self.tracewriter)
     LOG.info("Copied to %s", system_repo_filename)
Esempio n. 16
0
def download(distro, uri, target_dir, **kwargs):
    puri = urlparse(uri)
    scheme = puri.scheme.lower()
    path = puri.path
    if scheme in ['git'] or path.find('.git') != -1:
        dirs_made = sh.mkdirslist(target_dir)
        downloader = GitDownloader(distro, uri, target_dir)
        downloader.download()
        return dirs_made
    if scheme in ['http', 'https']:
        dirs_made = []
        with utils.tempdir() as tdir:
            fn = sh.basename(path)
            downloader = UrlLibDownloader(uri, sh.joinpths(tdir, fn))
            downloader.download()
            if fn.endswith('.tar.gz'):
                dirs_made = sh.mkdirslist(target_dir)
                cmd = ['tar', '-xzvf', sh.joinpths(tdir, fn), '-C', target_dir]
                sh.execute(*cmd)
            elif fn.endswith('.zip'):
                # TODO(harlowja) this might not be 100% right...
                # we might have to move the finished directory...
                dirs_made = sh.mkdirslist(target_dir)
                cmd = ['unzip', sh.joinpths(tdir, fn), '-d', target_dir]
                sh.execute(*cmd)
            else:
                raise excp.DownloadException("Unable to extract %s downloaded from %s" % (fn, uri))
        return dirs_made
    else:
        raise excp.DownloadException("Unknown scheme %s, unable to download from %s" % (scheme, uri))
Esempio n. 17
0
 def pre_build():
     build_requirements = self.requirements.get("build-requires")
     if build_requirements:
         utils.log_iterable(build_requirements,
                            header="Installing build requirements",
                            logger=LOG)
         self.helper.transaction(install_pkgs=build_requirements,
                                 tracewriter=self.tracewriter)
     build_requirements = ''
     try:
         build_requirements = sh.load_file(self.rpm_build_requires_filename)
     except IOError as e:
         if e.errno != errno.ENOENT:
             raise
     build_requirements = set(pkg_resources.yield_lines(build_requirements))
     for repo_name in self.REPOS:
         repo_dir = sh.joinpths(self.anvil_repo_dir, self.SRC_REPOS[repo_name])
         matched_paths = []
         paths = list_src_rpms(repo_dir)
         envra_details = self.envra_helper.explode(*paths)
         for (path, envra_detail) in zip(paths, envra_details):
             package_name = envra_detail.get('name')
             if package_name in build_requirements:
                 matched_paths.append(path)
                 build_requirements.discard(package_name)
         if matched_paths:
             with sh.remove_before(self.prebuild_dir) as prebuild_dir:
                 if not sh.isdir(prebuild_dir):
                     sh.mkdirslist(prebuild_dir, tracewriter=self.tracewriter)
                 for path in matched_paths:
                     sh.move(path, sh.joinpths(prebuild_dir, sh.basename(path)))
                 build(prebuild_dir, repo_name,
                       'Prebuilding %s RPM packages from their SRPMs'
                       ' for repo %s using %s jobs')
     return build_requirements
Esempio n. 18
0
    def _config_adjust_api(self, config):
        config.add("core_plugin",
                   self.plugin_configurators['core_plugin'].PLUGIN_CLASS)
        config.add('auth_strategy', 'keystone')
        config.add("api_paste_config", self.target_config(PASTE_CONF))
        # TODO(aababilov): add debug to other services conf files
        config.add('debug', self.installer.get_bool_option("debug"))
        config.add("log_file", "")
        config.add("log_dir", "/var/log/neutron")

        # Setup the interprocess locking directory
        # (don't put me on shared storage)
        lock_path = self.installer.get_option('lock_path')
        if not lock_path:
            lock_path = sh.joinpths(self.installer.get_option('component_dir'),
                                    'locks')
        sh.mkdirslist(lock_path, tracewriter=self.installer.tracewriter)
        config.add('lock_path', lock_path)

        self.setup_rpc(config, rpc_backends=MQ_BACKENDS)

        config.current_section = "AGENT"
        config.add("root_helper",
                   "sudo neutron-rootwrap /etc/neutron/rootwrap.conf")

        config.current_section = "keystone_authtoken"
        for (k, v) in self._fetch_keystone_params().items():
            config.add(k, v)
Esempio n. 19
0
 def _config_adjust_api_reg(self, contents, fn):
     gparams = ghelper.get_shared_params(**self.options)
     with io.BytesIO(contents) as stream:
         config = cfg.create_parser(cfg.RewritableConfigParser, self)
         config.readfp(stream)
         config.set('DEFAULT', 'debug', self.get_bool_option('verbose'))
         config.set('DEFAULT', 'verbose', self.get_bool_option('verbose'))
         if fn in [REG_CONF]:
             config.set('DEFAULT', 'bind_port', gparams['endpoints']['registry']['port'])
         else:
             config.set('DEFAULT', 'bind_port', gparams['endpoints']['public']['port'])
         config.set('DEFAULT', 'sql_connection', dbhelper.fetch_dbdsn(dbname=DB_NAME,
                                                                      utf8=True,
                                                                      dbtype=self.get_option('db', 'type'),
                                                                      **utils.merge_dicts(self.get_option('db'),
                                                                                          dbhelper.get_shared_passwords(self))))
         config.remove_option('DEFAULT', 'log_file')
         config.set('paste_deploy', 'flavor', self.get_option('paste_flavor'))
         for (k, v) in self._fetch_keystone_params().items():
             config.set('keystone_authtoken', k, v)
         if fn in [API_CONF]:
             config.set('DEFAULT', 'default_store', 'file')
             img_store_dir = sh.joinpths(self.get_option('component_dir'), 'images')
             config.set('DEFAULT', 'filesystem_store_datadir', img_store_dir)
             LOG.debug("Ensuring file system store directory %r exists and is empty." % (img_store_dir))
             if sh.isdir(img_store_dir):
                 sh.deldir(img_store_dir)
             sh.mkdirslist(img_store_dir, tracewriter=self.tracewriter, adjust_suids=True)
         return config.stringify(fn)
Esempio n. 20
0
 def _setup_pki(self):
     LOG.info("Setting up keystone's pki support.")
     for value in kconf.PKI_FILES.values():
         sh.mkdirslist(sh.dirname(sh.joinpths(self.configurator.link_dir, value)),
                       tracewriter=self.tracewriter)
     pki_cmd = MANAGE_CMD + ['pki_setup']
     cmds = [{'cmd': pki_cmd}]
     utils.execute_template(*cmds, cwd=self.bin_dir, params=self.config_params(None))
Esempio n. 21
0
 def _setup_pki(self):
     LOG.info("Setting up keystone's pki support.")
     for value in kconf.PKI_FILES.values():
         sh.mkdirslist(sh.dirname(sh.joinpths(self.configurator.link_dir, value)),
                       tracewriter=self.tracewriter)
     pki_cmd = MANAGE_CMD + ['pki_setup']
     cmds = [{'cmd': pki_cmd}]
     utils.execute_template(*cmds, cwd=self.bin_dir, params=self.config_params(None))
Esempio n. 22
0
 def _on_start(self, persona, component_order, instances):
     LOG.info("Booting up your components.")
     LOG.debug("Starting environment settings:")
     utils.log_object(env.get(), logger=LOG, level=logging.DEBUG, item_max_len=64)
     sh.mkdirslist(self.phase_dir)
     self._establish_passwords(component_order, instances)
     self._verify_components(component_order, instances)
     self._warm_components(component_order, instances)
Esempio n. 23
0
 def download_dependencies(self):
     """Download dependencies from `$deps_dir/download-requires`.
     """
     # NOTE(aababilov): do not drop download_dir - it can be reused
     sh.mkdirslist(self.download_dir, tracewriter=self.tracewriter)
     download_requires_filename = sh.joinpths(self.deps_dir,
                                              "download-requires")
     raw_pips_to_download = self.filter_download_requires()
     sh.write_file(download_requires_filename,
                   "\n".join(str(req) for req in raw_pips_to_download))
     if not raw_pips_to_download:
         return ([], [])
     downloaded_flag_file = sh.joinpths(self.deps_dir, "pip-downloaded")
     # NOTE(aababilov): user could have changed persona, so,
     # check that all requirements are downloaded
     if sh.isfile(downloaded_flag_file) and self._requirements_satisfied(
             raw_pips_to_download, self.download_dir):
         LOG.info("All python dependencies have been already downloaded")
     else:
         pip_dir = sh.joinpths(self.deps_dir, "pip")
         pip_download_dir = sh.joinpths(pip_dir, "download")
         pip_build_dir = sh.joinpths(pip_dir, "build")
         # NOTE(aababilov): do not clean the cache, it is always useful
         pip_cache_dir = sh.joinpths(self.deps_dir, "pip-cache")
         pip_failures = []
         for attempt in xrange(self.MAX_PIP_DOWNLOAD_ATTEMPTS):
             # NOTE(aababilov): pip has issues with already downloaded files
             sh.deldir(pip_dir)
             sh.mkdir(pip_download_dir, recurse=True)
             header = "Downloading %s python dependencies (attempt %s)"
             header = header % (len(raw_pips_to_download), attempt)
             utils.log_iterable(sorted(raw_pips_to_download),
                                logger=LOG,
                                header=header)
             failed = False
             try:
                 self._try_download_dependencies(attempt, raw_pips_to_download,
                                                 pip_download_dir,
                                                 pip_cache_dir, pip_build_dir)
                 pip_failures = []
             except exc.ProcessExecutionError as e:
                 LOG.exception("Failed downloading python dependencies")
                 pip_failures.append(e)
                 failed = True
             if not failed:
                 break
         for filename in sh.listdir(pip_download_dir, files_only=True):
             sh.move(filename, self.download_dir, force=True)
         sh.deldir(pip_dir)
         if pip_failures:
             raise pip_failures[-1]
         with open(downloaded_flag_file, "w"):
             pass
     pips_downloaded = [pip_helper.extract_requirement(p)
                        for p in raw_pips_to_download]
     self._examine_download_dir(pips_downloaded, self.download_dir)
     what_downloaded = sh.listdir(self.download_dir, files_only=True)
     return (pips_downloaded, what_downloaded)
Esempio n. 24
0
    def build_binary(self):
        def is_src_rpm(path):
            if not path:
                return False
            if not sh.isfile(path):
                return False
            if not path.lower().endswith('.src.rpm'):
                return False
            return True

        def list_src_rpms(path):
            path_files = []
            if sh.isdir(path):
                path_files = sh.listdir(path, filter_func=is_src_rpm)
            return sorted(path_files)

        build_requirements = self.requirements.get("build-requires")
        if build_requirements:
            utils.log_iterable(build_requirements,
                               header="Installing build requirements",
                               logger=LOG)
            self.helper.transaction(install_pkgs=build_requirements,
                                    tracewriter=self.tracewriter)

        for repo_name in self.REPOS:
            src_repo_dir = sh.joinpths(self.anvil_repo_dir, self.SRC_REPOS[repo_name])
            src_repo_files = list_src_rpms(src_repo_dir)
            if not src_repo_files:
                continue
            utils.log_iterable(src_repo_files,
                               header=('Building %s RPM packages from their'
                                      ' SRPMs for repo %s using %s jobs') %
                                      (len(src_repo_files), self.SRC_REPOS[repo_name], self._jobs),
                               logger=LOG)
            makefile_path = sh.joinpths(self.deps_dir, "binary-%s.mk" % repo_name)
            marks_dir = sh.joinpths(self.deps_dir, "marks-binary")
            if not sh.isdir(marks_dir):
                sh.mkdirslist(marks_dir, tracewriter=self.tracewriter)
            rpmbuild_flags = "--rebuild"
            if self.opts.get("usr_only", False):
                rpmbuild_flags += " --define 'usr_only 1'"
            params = {
                "SRC_REPO_DIR": src_repo_dir,
                "RPMBUILD_FLAGS": rpmbuild_flags,
                "LOGS_DIR": self.log_dir,
                'RPMTOP_DIR': self.rpmbuild_dir,
            }
            (_fn, content) = utils.load_template(sh.joinpths("packaging", "makefiles"), "binary.mk")
            sh.write_file(makefile_path, utils.expand_template(content, params),
                          tracewriter=self.tracewriter)
            with sh.remove_before_after(self.rpmbuild_dir):
                self._create_rpmbuild_subdirs()
                self._execute_make(makefile_path, marks_dir)
                repo_dir = sh.joinpths(self.anvil_repo_dir, repo_name)
                for d in sh.listdir(self.rpmbuild_dir, dirs_only=True):
                    self._move_rpm_files(sh.joinpths(d, "RPMS"), repo_dir)
                self._move_rpm_files(sh.joinpths(self.rpmbuild_dir, "RPMS"), repo_dir)
            self._create_repo(repo_name)
Esempio n. 25
0
 def _configure_instances_path(self, instances_path, nova_conf):
     nova_conf.add('instances_path', instances_path)
     if not sh.isdir(instances_path):
         LOG.debug("Attempting to create instance directory: %r",
                   instances_path)
         sh.mkdirslist(instances_path, tracewriter=self.tracewriter)
         LOG.debug("Adjusting permissions of instance directory: %r",
                   instances_path)
         sh.chmod(instances_path, 0777)
Esempio n. 26
0
 def build_paths(self):
     if self._build_paths is None:
         build_paths = {}
         for name in RPM_DIR_NAMES:
             final_path = sh.joinpths(self.package_dir, name.upper())
             build_paths[name] = final_path
             if sh.isdir(final_path):
                 sh.deldir(final_path, True)
             sh.mkdirslist(final_path, tracewriter=self.tracewriter)
         self._build_paths = build_paths
     return copy.deepcopy(self._build_paths)  # Return copy (not the same instance)
Esempio n. 27
0
 def _move_rpm_files(self, source_dir, target_dir):
     # NOTE(imelnikov): we should create target_dir even if we have
     #  nothing to move, because later we rely on its existence
     if not sh.isdir(target_dir):
         sh.mkdirslist(target_dir, tracewriter=self.tracewriter)
     if not sh.isdir(source_dir):
         return
     for filename in sh.listdir(source_dir, recursive=True, files_only=True):
         if not filename.lower().endswith(".rpm"):
             continue
         sh.move(filename, target_dir, force=True)
Esempio n. 28
0
 def build_paths(self):
     if self._build_paths is None:
         build_paths = {}
         for name in RPM_DIR_NAMES:
             final_path = sh.joinpths(self.package_dir, name.upper())
             build_paths[name] = final_path
             if sh.isdir(final_path):
                 sh.deldir(final_path, True)
             sh.mkdirslist(final_path, tracewriter=self.tracewriter)
         self._build_paths = build_paths
     return copy.deepcopy(self._build_paths)  # Return copy (not the same instance)
Esempio n. 29
0
    def _config_adjust_api(self, config):
        self._config_adjust_api_reg(config)
        gparams = ghelper.get_shared_params(**self.installer.options)
        config.add('bind_port', gparams['endpoints']['public']['port'])

        config.add( 'default_store', 'file')
        img_store_dir = sh.joinpths(self.installer.get_option('component_dir'), 'images')
        config.add('filesystem_store_datadir', img_store_dir)
        LOG.debug("Ensuring file system store directory %r exists and is empty." % (img_store_dir))
        if sh.isdir(img_store_dir):
            sh.deldir(img_store_dir)
        sh.mkdirslist(img_store_dir, tracewriter=self.installer.tracewriter)
Esempio n. 30
0
 def _move_rpm_files(self, source_dir, target_dir):
     # NOTE(imelnikov): we should create target_dir even if we have
     #  nothing to move, because later we rely on its existence
     if not sh.isdir(target_dir):
         sh.mkdirslist(target_dir, tracewriter=self.tracewriter)
     if not sh.isdir(source_dir):
         return
     for filename in sh.listdir(source_dir, recursive=True,
                                files_only=True):
         if not filename.lower().endswith(".rpm"):
             continue
         sh.move(filename, target_dir, force=True)
Esempio n. 31
0
 def _setup_logs(self, clear):
     log_fns = [self.access_log, self.error_log]
     utils.log_iterable(log_fns, logger=LOG,
                        header="Adjusting %s log files" % (len(log_fns)))
     for fn in log_fns:
         with sh.Rooted(True):
             if clear:
                 sh.unlink(fn, True)
             sh.mkdirslist(sh.dirname(fn))
             sh.touch_file(fn, die_if_there=False)
             sh.chmod(fn, 0666)
         self.tracewriter.file_touched(fn)
     return len(log_fns)
Esempio n. 32
0
 def _configure_files(self):
     config_fns = self.config_files
     if config_fns:
         utils.log_iterable(config_fns, logger=LOG, header="Configuring %s files" % (len(config_fns)))
         for fn in config_fns:
             tgt_fn = self.target_config(fn)
             sh.mkdirslist(sh.dirname(tgt_fn), tracewriter=self.tracewriter)
             (source_fn, contents) = self.source_config(fn)
             LOG.debug("Configuring file %s ---> %s.", (source_fn), (tgt_fn))
             contents = self._config_param_replace(fn, contents, self.config_params(fn))
             contents = self._config_adjust(contents, fn)
             sh.write_file(tgt_fn, contents, tracewriter=self.tracewriter)
     return len(config_fns)
Esempio n. 33
0
File: venv.py Progetto: y2kbot/anvil
 def package_start(self):
     super(VenvDependencyHandler, self).package_start()
     for instance in self.instances:
         if not self._is_buildable(instance):
             continue
         # Create a virtualenv...
         venv_dir = self._venv_directory_for(instance)
         sh.mkdirslist(venv_dir, tracewriter=self.tracewriter)
         cmd = ['virtualenv', '--clear', venv_dir]
         LOG.info("Creating virtualenv at %s", colorizer.quote(venv_dir))
         sh.execute(cmd)
         if self._PREQ_PKGS:
             self._install_into_venv(instance, self._PREQ_PKGS)
Esempio n. 34
0
    def build_binary(self):

        def _install_build_requirements():
            build_requires = self.requirements["build-requires"]
            if build_requires:
                utils.log_iterable(sorted(build_requires),
                                   header=("Installing %s build requirements" % len(build_requires)),
                                   logger=LOG)
                cmdline = ["yum", "install", "-y"] + list(build_requires)
                sh.execute(cmdline)

        def _is_src_rpm(filename):
            return filename.endswith('.src.rpm')

        _install_build_requirements()

        for repo_name in self.REPOS:
            repo_dir = sh.joinpths(self.anvil_repo_dir, repo_name)
            sh.mkdirslist(repo_dir, tracewriter=self.tracewriter)
            src_repo_dir = sh.joinpths(self.anvil_repo_dir, self.SRC_REPOS[repo_name])
            if sh.isdir(src_repo_dir):
                src_repo_files = sh.listdir(src_repo_dir, files_only=True)
                src_repo_files = sorted([f for f in src_repo_files if _is_src_rpm(f)])
            else:
                src_repo_files = []
            if not src_repo_files:
                continue
            src_repo_base_files = [sh.basename(f) for f in src_repo_files]
            LOG.info('Building %s RPM packages from their SRPMs for repo %s using %s jobs',
                     len(src_repo_files), self.SRC_REPOS[repo_name], self.jobs)
            makefile_name = sh.joinpths(self.deps_dir, "binary-%s.mk" % repo_name)
            marks_dir = sh.joinpths(self.deps_dir, "marks-binary")
            sh.mkdirslist(marks_dir, tracewriter=self.tracewriter)
            (_fn, content) = utils.load_template("packaging/makefiles", "binary.mk")
            rpmbuild_flags = ("--rebuild --define '_topdir %s'" % self.rpmbuild_dir)
            if self.opts.get("usr_only", False):
                rpmbuild_flags += "--define 'usr_only 1'"
            params = {
                "SRC_REPO_DIR": src_repo_dir,
                "RPMBUILD_FLAGS": rpmbuild_flags,
                "LOGS_DIR": self.log_dir,
            }
            sh.write_file(makefile_name,
                          utils.expand_template(content, params),
                          tracewriter=self.tracewriter)
            with sh.remove_before_after(self.rpmbuild_dir):
                self._create_rpmbuild_subdirs()
                self._execute_make(makefile_name, marks_dir)
                self._move_files(sh.joinpths(self.rpmbuild_dir, "RPMS"),
                                 repo_dir)
            self._create_repo(repo_name)
Esempio n. 35
0
 def download_dependencies(self):
     """Download dependencies from `$deps_dir/download-requires`."""
     # NOTE(aababilov): do not drop download_dir - it can be reused
     sh.mkdirslist(self.download_dir, tracewriter=self.tracewriter)
     pips_to_download = self._filter_download_requires()
     sh.write_file(self.download_requires_filename,
                   "\n".join([str(req) for req in pips_to_download]))
     if not pips_to_download:
         return ([], [])
     # NOTE(aababilov): user could have changed persona, so,
     # check that all requirements are downloaded
     if (sh.isfile(self.downloaded_flag_file)
             and self._requirements_satisfied(pips_to_download,
                                              self.download_dir)):
         LOG.info("All python dependencies have been already downloaded")
     else:
         pip_failures = []
         for attempt in xrange(self.MAX_PIP_DOWNLOAD_ATTEMPTS):
             # NOTE(aababilov): pip has issues with already downloaded files
             for filename in sh.listdir(self.download_dir, files_only=True):
                 sh.unlink(filename)
             header = "Downloading %s python dependencies (attempt %s)"
             header = header % (len(pips_to_download), attempt + 1)
             utils.log_iterable(sorted(pips_to_download),
                                logger=LOG,
                                header=header)
             failed = False
             try:
                 self._try_download_dependencies(attempt + 1,
                                                 pips_to_download,
                                                 self.download_dir)
                 pip_failures = []
             except exc.ProcessExecutionError as e:
                 LOG.exception("Failed downloading python dependencies")
                 pip_failures.append(e)
                 failed = True
             if not failed:
                 break
         if pip_failures:
             raise pip_failures[-1]
         # NOTE(harlowja): Mark that we completed downloading successfully
         sh.touch_file(self.downloaded_flag_file,
                       die_if_there=False,
                       quiet=True,
                       tracewriter=self.tracewriter)
     pips_downloaded = [
         pip_helper.extract_requirement(p) for p in pips_to_download
     ]
     self._examine_download_dir(pips_downloaded, self.download_dir)
     return (pips_downloaded, sh.listdir(self.download_dir,
                                         files_only=True))
Esempio n. 36
0
 def build_all_binaries(self, repo_name, src_repo_dir, rpmbuild_flags, tracewriter, jobs):
     makefile_path = sh.joinpths(self._deps_dir, "binary-%s.mk" % repo_name)
     marks_dir = sh.joinpths(self._deps_dir, "marks-binary")
     if not sh.isdir(marks_dir):
         sh.mkdirslist(marks_dir, tracewriter=tracewriter)
     params = {
         "SRC_REPO_DIR": src_repo_dir,
         "RPMBUILD_FLAGS": rpmbuild_flags,
         "LOGS_DIR": self._log_dir,
         "RPMTOP_DIR": self._rpmbuild_dir,
     }
     (_fn, content) = utils.load_template(sh.joinpths("packaging", "makefiles"), "binary.mk")
     sh.write_file(makefile_path, utils.expand_template(content, params), tracewriter=tracewriter)
     self._execute_make(makefile_path, marks_dir, jobs)
Esempio n. 37
0
 def _configure_files(self):
     config_fns = self.configurator.config_files
     if config_fns:
         utils.log_iterable(config_fns, logger=LOG,
                            header="Configuring %s files" % (len(config_fns)))
         for fn in config_fns:
             tgt_fn = self.configurator.target_config(fn)
             sh.mkdirslist(sh.dirname(tgt_fn), tracewriter=self.tracewriter)
             (source_fn, contents) = self.configurator.source_config(fn)
             LOG.debug("Configuring file %s ---> %s.", (source_fn), (tgt_fn))
             contents = self.configurator.config_param_replace(fn, contents, self.config_params(fn))
             contents = self.configurator.config_adjust(contents, fn)
             sh.write_file(tgt_fn, contents, tracewriter=self.tracewriter)
     return len(config_fns)
Esempio n. 38
0
 def configure(self):
     configs_made = nova.NovaInstaller.configure(self)
     driver_canon = utils.canon_virt_driver(self.get_option('virt_driver'))
     if driver_canon == 'libvirt':
         # Create a libvirtd user group
         if not sh.group_exists('libvirtd'):
             cmd = ['groupadd', 'libvirtd']
             sh.execute(cmd)
         if not sh.isfile(LIBVIRT_POLICY_FN):
             contents = self._get_policy(self._get_policy_users())
             sh.mkdirslist(sh.dirname(LIBVIRT_POLICY_FN))
             sh.write_file(LIBVIRT_POLICY_FN, contents)
             configs_made += 1
     return configs_made
Esempio n. 39
0
 def configure(self):
     configs_made = nova.NovaInstaller.configure(self)
     driver_canon = utils.canon_virt_driver(self.get_option('virt_driver'))
     if driver_canon == 'libvirt':
         # Create a libvirtd user group
         if not sh.group_exists('libvirtd'):
             cmd = ['groupadd', 'libvirtd']
             sh.execute(cmd)
         if not sh.isfile(LIBVIRT_POLICY_FN):
             contents = self._get_policy(self._get_policy_users())
             sh.mkdirslist(sh.dirname(LIBVIRT_POLICY_FN))
             sh.write_file(LIBVIRT_POLICY_FN, contents)
             configs_made += 1
     return configs_made
Esempio n. 40
0
 def download(self):
     (from_uri, target_dir) = self._get_download_location()
     if not from_uri and not target_dir:
         return []
     else:
         uris = [from_uri]
         utils.log_iterable(uris, logger=LOG, header="Downloading from %s uris" % (len(uris)))
         sh.mkdirslist(target_dir, tracewriter=self.tracewriter)
         # This is used to delete what is downloaded (done before
         # fetching to ensure its cleaned up even on download failures)
         self.tracewriter.download_happened(target_dir, from_uri)
         fetcher = down.GitDownloader(self.distro, from_uri, target_dir)
         fetcher.download()
         return uris
Esempio n. 41
0
 def package_instance(self, instance):
     with sh.remove_before_after(self.rpmbuild_dir):
         for dirname in (sh.joinpths(self.rpmbuild_dir, "SPECS"),
                         sh.joinpths(self.rpmbuild_dir, "SOURCES")):
             sh.mkdirslist(dirname)
         if instance.name in ["general"]:
             self._build_dependencies()
             self._move_srpms("anvil-deps")
         else:
             # Meta packages don't get built.
             app_dir = instance.get_option("app_dir")
             if sh.isdir(app_dir):
                 self._build_openstack_package(instance)
                 self._move_srpms("anvil")
Esempio n. 42
0
 def _setup_logs(self, clear):
     log_fns = [self.access_log, self.error_log]
     utils.log_iterable(log_fns,
                        logger=LOG,
                        header="Adjusting %s log files" % (len(log_fns)))
     for fn in log_fns:
         with sh.Rooted(True):
             if clear:
                 sh.unlink(fn, True)
             sh.mkdirslist(sh.dirname(fn))
             sh.touch_file(fn, die_if_there=False)
             sh.chmod(fn, 0666)
         self.tracewriter.file_touched(fn)
     return len(log_fns)
Esempio n. 43
0
    def _config_adjust_api(self, config):
        self._config_adjust_api_reg(config)
        gparams = ghelper.get_shared_params(**self.installer.options)
        config.add('bind_port', gparams['endpoints']['public']['port'])

        config.add('default_store', 'file')
        img_store_dir = sh.joinpths(self.installer.get_option('component_dir'),
                                    'images')
        config.add('filesystem_store_datadir', img_store_dir)
        LOG.debug(
            "Ensuring file system store directory %r exists and is empty." %
            (img_store_dir))
        if sh.isdir(img_store_dir):
            sh.deldir(img_store_dir)
        sh.mkdirslist(img_store_dir, tracewriter=self.installer.tracewriter)
Esempio n. 44
0
 def download(self):
     (from_uri, target_dir) = self._get_download_location()
     if not from_uri and not target_dir:
         return []
     else:
         uris = [from_uri]
         utils.log_iterable(uris, logger=LOG,
                            header="Downloading from %s uris" % (len(uris)))
         sh.mkdirslist(target_dir, tracewriter=self.tracewriter)
         # This is used to delete what is downloaded (done before
         # fetching to ensure its cleaned up even on download failures)
         self.tracewriter.download_happened(target_dir, from_uri)
         fetcher = down.GitDownloader(self.distro, from_uri, target_dir)
         fetcher.download()
         return uris
Esempio n. 45
0
 def _create_log_dirs(self):
     data_dir = sh.joinpths(self.get_option("app_dir"), self.cfg.getdefaulted("swift", "data_location", "data"))
     cfg_dir = self.get_option("cfg_dir")
     log_dir = sh.joinpths(data_dir, LOG_DIR)
     self.tracewriter.dirs_made(*sh.mkdirslist(sh.joinpths(log_dir, "hourly")))
     sh.symlink(sh.joinpths(cfg_dir, SYSLOG_CONF), SWIFT_RSYNC_LOC)
     self.tracewriter.symlink_made(SWIFT_RSYNC_LOC)
Esempio n. 46
0
 def _config_adjust_api(self, contents, fn):
     params = ghelper.get_shared_params(**self.options)
     with io.BytesIO(contents) as stream:
         config = cfg.create_parser(cfg.RewritableConfigParser, self)
         config.readfp(stream)
         img_store_dir = sh.joinpths(self.get_option('component_dir'),
                                     'images')
         config.set('DEFAULT', 'debug', self.get_bool_option('verbose', ))
         config.set('DEFAULT', 'verbose', self.get_bool_option('verbose'))
         config.set('DEFAULT', 'default_store', 'file')
         config.set('DEFAULT', 'filesystem_store_datadir', img_store_dir)
         config.set('DEFAULT', 'bind_port',
                    params['endpoints']['public']['port'])
         config.set(
             'DEFAULT', 'sql_connection',
             dbhelper.fetch_dbdsn(dbname=DB_NAME,
                                  utf8=True,
                                  dbtype=self.get_option('db', 'type'),
                                  **utils.merge_dicts(
                                      self.get_option('db'),
                                      dbhelper.get_shared_passwords(self))))
         config.remove_option('DEFAULT', 'log_file')
         config.set('paste_deploy', 'flavor',
                    self.get_option('paste_flavor'))
         LOG.debug(
             "Ensuring file system store directory %r exists and is empty."
             % (img_store_dir))
         sh.deldir(img_store_dir)
         self.tracewriter.dirs_made(*sh.mkdirslist(img_store_dir))
         return config.stringify(fn)
Esempio n. 47
0
 def _start(self):
     if self.started:
         return
     else:
         trace_dirs = sh.mkdirslist(sh.dirname(self.trace_fn))
         sh.touch_file(self.trace_fn, die_if_there=self.break_if_there)
         self.started = True
         self.dirs_made(*trace_dirs)
Esempio n. 48
0
 def _configure_instances_path(self, instances_path, nova_conf):
     nova_conf.add('instances_path', instances_path)
     LOG.debug("Attempting to create instance directory: %r",
               instances_path)
     self.installer.tracewriter.dirs_made(*sh.mkdirslist(instances_path))
     LOG.debug("Adjusting permissions of instance directory: %r",
               instances_path)
     sh.chmod(instances_path, 0777)
Esempio n. 49
0
File: venv.py Progetto: jzako/anvil
 def package_start(self):
     super(VenvDependencyHandler, self).package_start()
     self.install_counters.clear()
     base_cmd = env.get_key('VENV_CMD', default_value='virtualenv')
     for instance in self.instances:
         if not self._is_buildable(instance):
             continue
         # Create a virtualenv...
         venv_dir = self._venv_directory_for(instance)
         sh.mkdirslist(venv_dir, tracewriter=self.tracewriter)
         cmd = [base_cmd, '--clear', venv_dir]
         LOG.info("Creating virtualenv at %s", colorizer.quote(venv_dir))
         out_filename = sh.joinpths(self.log_dir, "venv-create-%s.log" % (instance.name))
         sh.execute_save_output(cmd, out_filename)
         self._install_into_venv(instance,
                                 self.PREREQUISITE_UPGRADE_PKGS,
                                 upgrade=True)
Esempio n. 50
0
 def _start(self):
     if self.started:
         return
     else:
         trace_dirs = sh.mkdirslist(sh.dirname(self.trace_fn))
         sh.touch_file(self.trace_fn, die_if_there=self.break_if_there)
         self.started = True
         self.dirs_made(*trace_dirs)
Esempio n. 51
0
 def package_start(self):
     super(VenvDependencyHandler, self).package_start()
     self.install_counters.clear()
     base_cmd = env.get_key('VENV_CMD', default_value='virtualenv')
     for instance in self.instances:
         if not self._is_buildable(instance):
             continue
         # Create a virtualenv...
         venv_dir = self._venv_directory_for(instance)
         sh.mkdirslist(venv_dir, tracewriter=self.tracewriter)
         cmd = [base_cmd, '--clear', venv_dir]
         LOG.info("Creating virtualenv at %s", colorizer.quote(venv_dir))
         out_filename = sh.joinpths(self.log_dir,
                                    "venv-create-%s.log" % (instance.name))
         sh.execute_save_output(cmd, out_filename)
         self._install_into_venv(instance,
                                 self.PREREQUISITE_UPGRADE_PKGS,
                                 upgrade=True)
Esempio n. 52
0
 def build_all_binaries(self, repo_name, src_repo_dir, rpmbuild_flags,
                        tracewriter, jobs):
     makefile_path = sh.joinpths(self._deps_dir, "binary-%s.mk" % repo_name)
     marks_dir = sh.joinpths(self._deps_dir, "marks-binary")
     if not sh.isdir(marks_dir):
         sh.mkdirslist(marks_dir, tracewriter=tracewriter)
     params = {
         "SRC_REPO_DIR": src_repo_dir,
         "RPMBUILD_FLAGS": rpmbuild_flags,
         "LOGS_DIR": self._log_dir,
         "RPMTOP_DIR": self._rpmbuild_dir,
     }
     (_fn,
      content) = utils.load_template(sh.joinpths("packaging", "makefiles"),
                                     "binary.mk")
     sh.write_file(makefile_path,
                   utils.expand_template(content, params),
                   tracewriter=tracewriter)
     self._execute_make(makefile_path, marks_dir, jobs)
Esempio n. 53
0
 def _install_python_setups(self):
     py_dirs = self.python_directories
     if py_dirs:
         real_dirs = {}
         for (name, wkdir) in py_dirs.items():
             real_dirs[name] = wkdir
             if not real_dirs[name]:
                 real_dirs[name] = self.get_option('app_dir')
         utils.log_iterable(real_dirs.values(), logger=LOG,
                            header="Setting up %s python directories" % (len(real_dirs)))
         setup_cmd = self.distro.get_command('python', 'setup')
         for (name, working_dir) in real_dirs.items():
             sh.mkdirslist(working_dir, tracewriter=self.tracewriter)
             setup_fn = sh.joinpths(self.get_option('trace_dir'), "%s.python.setup" % (name))
             sh.execute(*setup_cmd, cwd=working_dir, run_as_root=True,
                        stderr_fn='%s.stderr' % (setup_fn),
                        stdout_fn='%s.stdout' % (setup_fn),
                        tracewriter=self.tracewriter)
             self.tracewriter.py_installed(name, working_dir)
Esempio n. 54
0
 def build_paths(self):
     if self._build_paths is None:
         bpaths = {}
         for name in ['sources', 'specs', 'srpms', 'rpms', 'build']:
             final_path = sh.joinpths(self.package_dir, name.upper())
             bpaths[name] = final_path
             if sh.isdir(final_path):
                 sh.deldir(final_path, True)
             self.tracewriter.dirs_made(*sh.mkdirslist(final_path))
         self._build_paths = bpaths
     return dict(self._build_paths)
Esempio n. 55
0
    def _config_adjust_api(self, config):
        config.add("core_plugin", CORE_PLUGIN_CLASSES[self.core_plugin])
        config.add('auth_strategy', 'keystone')
        config.add("api_paste_config", self.target_config(PASTE_CONF))
        # TODO(aababilov): add debug to other services conf files
        config.add('debug', self.installer.get_bool_option("debug"))

        # Setup the interprocess locking directory
        # (don't put me on shared storage)
        lock_path = self.installer.get_option('lock_path')
        if not lock_path:
            lock_path = sh.joinpths(self.installer.get_option('component_dir'),
                                    'locks')
        sh.mkdirslist(lock_path, tracewriter=self.installer.tracewriter)
        config.add('lock_path', lock_path)

        self.setup_rpc(config, 'quantum.openstack.common.rpc.impl_kombu')

        config.current_section = "keystone_authtoken"
        for (k, v) in self._fetch_keystone_params().items():
            config.add(k, v)
Esempio n. 56
0
    def download_dependencies(self):
        """Download dependencies from `$deps_dir/download-requires`."""
        # NOTE(aababilov): do not drop download_dir - it can be reused
        sh.mkdirslist(self.download_dir, tracewriter=self.tracewriter)
        pips_to_download = self._filter_download_requires()
        sh.write_file(self.download_requires_filename,
                      "\n".join([str(req) for req in pips_to_download]))
        if not pips_to_download:
            return ([], [])
        # NOTE(aababilov): user could have changed persona, so,
        # check that all requirements are downloaded
        if (sh.isfile(self.downloaded_flag_file)
                and self._requirements_satisfied(pips_to_download,
                                                 self.download_dir)):
            LOG.info("All python dependencies have been already downloaded")
        else:

            def try_download(attempt):
                LOG.info(
                    "Downloading %s dependencies with pip (attempt %s)...",
                    len(pips_to_download), attempt)
                output_filename = sh.joinpths(
                    self.log_dir, "pip-download-attempt-%s.log" % (attempt))
                pip_helper.download_dependencies(self.download_dir,
                                                 pips_to_download,
                                                 output_filename)

            utils.retry(self.MAX_PIP_DOWNLOAD_ATTEMPTS,
                        self.PIP_DOWNLOAD_DELAY, try_download)
            # NOTE(harlowja): Mark that we completed downloading successfully
            sh.touch_file(self.downloaded_flag_file,
                          die_if_there=False,
                          quiet=True,
                          tracewriter=self.tracewriter)
        pips_downloaded = [
            pip_helper.extract_requirement(p) for p in pips_to_download
        ]
        what_downloaded = self._examine_download_dir(pips_downloaded,
                                                     self.download_dir)
        return (pips_downloaded, what_downloaded)
Esempio n. 57
0
 def download_dependencies(self):
     """Download dependencies from `$deps_dir/download-requires`."""
     # NOTE(aababilov): do not drop download_dir - it can be reused
     sh.mkdirslist(self.download_dir, tracewriter=self.tracewriter)
     pips_to_download = self._filter_download_requires()
     sh.write_file(self.download_requires_filename,
                   "\n".join([str(req) for req in pips_to_download]))
     if not pips_to_download:
         return ([], [])
     # NOTE(aababilov): user could have changed persona, so,
     # check that all requirements are downloaded....
     if self._requirements_satisfied(pips_to_download, self.download_dir):
         LOG.info("All python dependencies have been already downloaded")
     else:
         utils.retry(self.retries, self.retry_delay, self._try_download,
                     pips_to_download)
     pips_downloaded = [
         pip_helper.extract_requirement(p) for p in pips_to_download
     ]
     what_downloaded = self._examine_download_dir(pips_downloaded,
                                                  self.download_dir)
     return (pips_downloaded, what_downloaded)
Esempio n. 58
0
    def download(self):
        """Download sources needed to build the component, if any."""
        target_dir = self.get_option('app_dir')
        download_cfg = utils.load_yaml(self._origins_fn).get(self.name, {})
        if not target_dir or not download_cfg:
            return []

        uri = download_cfg.pop('repo', None)
        if not uri:
            raise ValueError(
                ("Could not find repo uri for %r component from the %r "
                 "config file." % (self.name, self._origins_fn)))

        uris = [uri]
        utils.log_iterable(uris,
                           logger=LOG,
                           header="Downloading from %s uris" % (len(uris)))
        sh.mkdirslist(target_dir, tracewriter=self.tracewriter)
        # This is used to delete what is downloaded (done before
        # fetching to ensure its cleaned up even on download failures)
        self.tracewriter.download_happened(target_dir, uri)
        down.GitDownloader(uri, target_dir, **download_cfg).download()
        return uris
Esempio n. 59
0
 def download(self):
     (from_uri, target_dir) = self._get_download_location()
     if not from_uri and not target_dir:
         return []
     else:
         uris = [from_uri]
         utils.log_iterable(uris,
                            logger=LOG,
                            header="Downloading from %s uris" % (len(uris)))
         self.tracewriter.download_happened(target_dir, from_uri)
         dirs_made = sh.mkdirslist(target_dir)
         self.tracewriter.dirs_made(*dirs_made)
         down.download(self.distro, from_uri, target_dir)
         return uris
Esempio n. 60
0
    def _config_adjust_api(self, nova_conf):
        ''' This method has the smarts to build the configuration file based on
            various runtime values. A useful reference for figuring out this
            is at http://docs.openstack.org/diablo/openstack-compute/admin/content/ch_configuring-openstack-compute.html
            See also: https://github.com/openstack/nova/blob/master/etc/nova/nova.conf.sample
        '''

        # Used more than once so we calculate it ahead of time
        hostip = self.installer.get_option('ip')

        nova_conf.add('verbose', self.installer.get_bool_option('log_verbose'))

        # Allow destination machine to match source for resize.
        nova_conf.add('allow_resize_to_same_host', True)

        # Which scheduler do u want?
        nova_conf.add(
            'compute_scheduler_driver',
            self.installer.get_option(
                'scheduler',
                default_value='nova.scheduler.filter_scheduler.FilterScheduler'
            ))

        # Rate limit the api??
        nova_conf.add('api_rate_limit',
                      self.installer.get_bool_option('api_rate_limit'))

        # Ensure the policy.json is referenced correctly
        nova_conf.add('policy_file', '/etc/nova/policy.json')

        # Setup nova network/settings
        self._configure_network_settings(nova_conf)

        # The ip of where we are running
        nova_conf.add('my_ip', hostip)

        nova_conf.add('sql_connection', self.fetch_dbdsn())

        # Configure anything libvirt related?
        virt_driver = utils.canon_virt_driver(
            self.installer.get_option('virt_driver'))
        if virt_driver == 'libvirt':
            self._configure_libvirt(
                lv.canon_libvirt_type(
                    self.installer.get_option('libvirt_type')), nova_conf)

        # How instances will be presented
        instance_template = "%s%s" % (
            self.installer.get_option('instance_name_prefix'),
            self.installer.get_option('instance_name_postfix'))
        if not instance_template:
            instance_template = 'instance-%08x'
        nova_conf.add('instance_name_template', instance_template)

        # Enable the standard extensions
        nova_conf.add(
            'osapi_compute_extension',
            'nova.api.openstack.compute.contrib.standard_extensions')

        # Auth will be using keystone
        nova_conf.add('auth_strategy', 'keystone')

        # Is config drive being forced on?
        if self.installer.get_bool_option('force_cfg_drive'):
            nova_conf.add('force_config_drive', 'always')

        # Don't always force images to raw, which makes things take time to get to raw...
        nova_conf.add('force_raw_images',
                      self.installer.get_bool_option('force_raw_images'))

        # Add a checksum for images fetched for each hypervisor?
        # This check absorbs cpu cycles, warning....
        nova_conf.add('checksum_base_images',
                      self.installer.get_bool_option('checksum_base_images'))

        # Setup the interprocess locking directory (don't put me on shared storage)
        lock_path = self.installer.get_option('lock_path')
        if not lock_path:
            lock_path = sh.joinpths(self.installer.get_option('component_dir'),
                                    'locks')
        sh.mkdirslist(lock_path, tracewriter=self.tracewriter)
        nova_conf.add('lock_path', lock_path)

        # Vnc settings setup
        self._configure_vnc(nova_conf)

        # Where our paste config is
        nova_conf.add('api_paste_config', self.target_config(PASTE_CONF))

        # What our imaging service will be
        self._configure_image_service(nova_conf, hostip)

        # Configs for ec2 / s3 stuff
        nova_conf.add(
            'ec2_dmz_host',
            self.installer.get_option('ec2_dmz_host', default_value=hostip))
        nova_conf.add('s3_host', hostip)

        # How is your message queue setup?
        self.setup_rpc(nova_conf, 'nova.rpc.impl_kombu')

        # The USB tablet device is meant to improve mouse behavior in
        # the VNC console, but it has the side effect of increasing
        # the CPU usage of an idle VM tenfold.
        nova_conf.add('use_usb_tablet', False)

        # Where instances will be stored
        instances_path = self.installer.get_option('instances_path')
        if not instances_path:
            instances_path = sh.joinpths(
                self.installer.get_option('component_dir'), 'instances')
        self._configure_instances_path(instances_path, nova_conf)

        # Is this a multihost setup?
        self._configure_multihost(nova_conf)

        # Handle any virt driver specifics
        self._configure_virt_driver(nova_conf)

        # Handle configuring the conductor service
        self._configure_conductor(nova_conf)