Пример #1
0
    def __init__(self, dbobj=None, logger=LOGGER):
        self.config = Config()
        self.dbobj = dbobj
        self.logger = logger

        if self.template_type is None:
            raise InternalError("Plenary class %s did not set the template "
                                "type" % self.__class__.__name__)

        # Object templates live under the branch-specific build directory.
        # Everything else lives under the common plenary directory.
        if self.template_type == "object":
            if not dbobj or not hasattr(dbobj, "branch"):
                raise InternalError("Plenaries meant to be compiled need a DB "
                                    "object that has a branch; got: %r" %
                                    dbobj)
            self.dir = "%s/domains/%s/profiles" % (self.config.get(
                "broker", "builddir"), dbobj.branch.name)
        else:
            self.dir = self.config.get("broker", "plenarydir")

        self.loadpath = None
        self.plenary_template = None
        self.plenary_core = None

        self.new_content = None
        # The following attributes are for stash/restore_stash
        self.old_content = None
        self.old_mtime = None
        self.stashed = False
        self.removed = False
        self.changed = False
Пример #2
0
    def onEnter(self, dbcluster):
        dbdecommissioned = HostLifecycle.get_unique(object_session(dbcluster),
                                                    "decommissioned",
                                                    compel=True)

        config = Config()
        archetype = dbcluster.personality.archetype
        section = "archetype_" + archetype.name
        opt = "allow_cascaded_deco"

        if dbcluster.hosts and (not config.has_option(section, opt)
                                or not config.getboolean(section, opt)):
            raise ArgumentError("Cannot change state to {0}, as {1}'s "
                                "archetype is {2}.".format(
                                    dbdecommissioned.name, dbcluster,
                                    archetype.name))

        if dbcluster.machines:
            raise ArgumentError("Cannot change state to {0}, as {1} has "
                                "{2} VM(s).".format(dbdecommissioned.name,
                                                    dbcluster,
                                                    len(dbcluster.machines)))

        for dbhost in dbcluster.hosts:
            dbhost.status.transition(dbhost, dbdecommissioned)
Пример #3
0
def sync_domain(dbdomain, logger=LOGGER, locked=False):
    """Update templates on disk to match contents of branch in template-king.

    If this domain is tracking another, first update the branch in
    template-king with the latest from the tracking branch.  Also save
    the current (previous) commit as a potential rollback point.

    """
    config = Config()
    kingdir = config.get("broker", "kingdir")
    domaindir = os.path.join(config.get("broker", "domainsdir"), dbdomain.name)
    git_env = {"PATH": "%s:%s" % (config.get("broker", "git_path"),
                                  os.environ.get("PATH", ""))}
    if dbdomain.tracked_branch:
        # Might need to revisit if using this helper from rollback...
        run_command(["git", "push", ".",
                     "%s:%s" % (dbdomain.tracked_branch.name, dbdomain.name)],
                    path=kingdir, env=git_env, logger=logger)
    run_command(["git", "fetch", "--prune"], path=domaindir, env=git_env, logger=logger)
    if dbdomain.tracked_branch:
        out = run_command(["git", "rev-list", "-n", "1", "HEAD"],
                          path=domaindir, env=git_env, logger=logger)
        rollback_commit = out.strip()
    try:
        if not locked:
            key = CompileKey(domain=dbdomain.name, logger=logger)
            lock_queue.acquire(key)
        run_command(["git", "reset", "--hard", "origin/%s" % dbdomain.name],
                    path=domaindir, env=git_env, logger=logger)
    finally:
        if not locked:
            lock_queue.release(key)
    if dbdomain.tracked_branch:
        dbdomain.rollback_commit = rollback_commit
Пример #4
0
def discover_network_types(dbapi_con, connection_record):  # pylint: disable=W0613
    config = Config()
    if not config.has_option("broker", "default_network_type"):  # pragma: no cover
        raise InternalError("The default_network_type option is missing from "
                            "the [broker] section in the configuration.")

    default_type = config.get("broker", "default_network_type")
    default_section = "network_" + default_type
    if not config.has_section(default_section):  # pragma: no cover
        raise InternalError("The default network type is %s, but there's no "
                            "section named [%s] in the configuration." %
                            (default_type, default_section))

    nettypes = {}

    # This function should be called only once, but you never know...
    if Network.network_type_map:
        return

    for section in config.sections():
        if not section.startswith("network_"):
            continue
        name = section[8:]
        nettypes[name] = NetworkProperties(config, name)
        LOGGER.info("Configured network type %s", name)

    Network.network_type_map = nettypes
    Network.default_network_props = nettypes[default_type]
Пример #5
0
def run_git(args, env=None, path=".",
            logger=LOGGER, loglevel=logging.INFO, filterre=None):
    config = Config()
    if env:
        git_env = env.copy()
    else:
        git_env = {}
    env_path = git_env.get("PATH", os.environ.get("PATH", ""))
    git_env["PATH"] = "%s:%s" % (config.get("broker", "git_path"), env_path)

    for name in ["git_author_name", "git_author_email",
                 "git_committer_name", "git_committer_email"]:
        if not config.has_option("broker", name):
            continue
        value = config.get("broker", name)
        git_env[name.upper()] = value

    if isinstance(args, list):
        git_args = args[:]
        if git_args[0] != "git":
            git_args.insert(0, "git")
    else:
        git_args = ["git", args]

    return run_command(git_args, env=git_env, path=path,
                       logger=logger, loglevel=loglevel, filterre=filterre)
Пример #6
0
    def onEnter(self, dbcluster):
        dbdecommissioned = HostLifecycle.get_unique(object_session(dbcluster),
                                                    "decommissioned",
                                                    compel=True)

        config = Config()
        archetype = dbcluster.personality.archetype
        section = "archetype_" + archetype.name
        opt = "allow_cascaded_deco"

        if dbcluster.hosts and (not config.has_option(section, opt) or
                                not config.getboolean(section, opt)):
            raise ArgumentError("Cannot change state to {0}, as {1}'s "
                                "archetype is {2}."
                                .format(dbdecommissioned.name, dbcluster,
                                        archetype.name))

        if dbcluster.virtual_machines:
            raise ArgumentError("Cannot change state to {0}, as {1} has "
                                "{2} VM(s)."
                                .format(dbdecommissioned.name, dbcluster,
                                        len(dbcluster.virtual_machines)))

        for dbhost in dbcluster.hosts:
            dbhost.status.transition(dbhost, dbdecommissioned)
Пример #7
0
    def config_proto(self, node, command):
        desc_node = node.find("message_class")
        if desc_node is None or "name" not in desc_node.attrib or \
           "module" not in desc_node.attrib:
            raise ProtocolError("Invalid protobuf definition for %s." % command)

        module = desc_node.attrib["module"]
        msgclass = desc_node.attrib["name"]

        if module in self.loaded_protocols and \
           self.loaded_protocols[module] == False:
            raise ProtocolError("Protocol %s: previous import attempt was "
                                "unsuccessful" % module)

        if module not in self.loaded_protocols:
            config = Config()
            protodir = config.get("protocols", "directory")

            # Modifying sys.path here is ugly. We could try playing with
            # find_module()/load_module(), but there are dependencies between
            # the protocols, that could fail if sys.path is not set up and the
            # protocols are loaded in the wrong order.
            if protodir not in sys.path:
                sys.path.append(protodir)

            try:
                self.loaded_protocols[module] = __import__(module)
            except ImportError, err:  # pragma: no cover
                self.loaded_protocols[module] = False
                raise ProtocolError("Protocol %s: %s" % (module, err))
Пример #8
0
 def __init__(self, logger=LOGGER):
     config = Config()
     self.logger = logger
     self.dsdb = config.get("broker", "dsdb")
     self.dsdb_use_testdb = config.getboolean("broker", "dsdb_use_testdb")
     self.location_sync = config.getboolean("broker", "dsdb_location_sync")
     self.actions = []
     self.rollback_list = []
Пример #9
0
 def __init__(self, logger=LOGGER):
     config = Config()
     self.logger = logger
     self.dsdb = config.get("broker", "dsdb")
     self.dsdb_use_testdb = config.getboolean("broker", "dsdb_use_testdb")
     self.location_sync = config.getboolean("broker", "dsdb_location_sync")
     self.actions = []
     self.rollback_list = []
Пример #10
0
 def __init__(self, twistd=None, configfile=None):
     self.twistd = twistd or self.default_twistd
     self.configfile = configfile or self.default_configfile
     self.config = Config(configfile=self.configfile)
     self.pidfile = os.path.join(self.config.get("broker", "rundir"),
                                 "aqd.pid")
     self.logfile = self.config.get("broker", "logfile")
     self.coverage = os.path.join(self.config.get("broker", "logdir"),
                                  "aqd.coverage")
Пример #11
0
 def outputdirs(self):
     """Returns a list of directories that should exist before compiling"""
     config = Config()
     dirs = []
     dirs.append(config.get("broker", "profilesdir"))
     # The regression tests occasionally have issues with panc
     # auto-creating this directory - not sure why.
     if self.domain.clusters:
         dirs.append(os.path.join(config.get("broker", "quattordir"), "build", "xml", self.domain.name, "clusters"))
     return dirs
Пример #12
0
def cache_storage_data(only=None):
    """
    Scan a storeng-style data file, checking each line as we go

    Storeng-style data files are blocks of data. Each block starts
    with a comment describing the fields for all subsequent lines. A
    block can start at any time. Fields are separated by '|'.
    This function will invoke the function after parsing every data
    line. The function will be called with a dict of the fields. If the
    function returns True, then we stop scanning the file, else we continue
    on until there is nothing left to parse.

    dbshare can be a Share
    """

    config = Config()
    sharedata = {}
    found_header = False
    header_idx = {}
    with open(config.get("broker", "sharedata")) as datafile:
        for line in datafile:
            if line[0] == '#':
                # A header line
                found_header = True
                hdr = line[1:].rstrip().split('|')

                header_idx = {}
                for idx, name in enumerate(hdr):
                    header_idx[name] = idx

                # Silently discard lines that don't have all the required info
                for k in ["objtype", "pshare", "server", "dg"]:
                    if k not in header_idx:
                        found_header = False
            elif not found_header:
                # We haven't found the right header line
                continue
            else:
                fields = line.rstrip().split('|')
                if len(fields) != len(header_idx):  # Silently ignore invalid lines
                    continue
                if fields[header_idx["objtype"]] != "pshare":
                    continue

                sharedata[fields[header_idx["pshare"]]] = ShareInfo(
                    server=fields[header_idx["server"]],
                    mount="/vol/%s/%s" % (fields[header_idx["dg"]],
                                          fields[header_idx["pshare"]])
                )

                # Take a shortcut if we need just a single entry
                if only and only == fields[header_idx["pshare"]]:
                    break

        return sharedata
Пример #13
0
def write_file(filename, content, mode=None, compress=None, create_directory=False, logger=LOGGER):
    """Atomically write content into the specified filename.

    The content is written into a temp file in the same directory as
    filename, and then swapped into place with rename.  This assumes
    that both the file and the directory can be written to by the
    broker.  The same directory was used instead of a temporary
    directory because atomic swaps are generally only available when
    the source and the target are on the same filesystem.

    If mode is set, change permissions on the file (newly created or
    pre-existing) to the new mode.  If unset and the file exists, the
    current permissions will be kept.  If unset and the file is new,
    the default is 0644.

    This method may raise OSError if any of the OS-related methods
    (creating the temp file, writing to it, correcting permissions,
    swapping into place) fail.  The method will attempt to remove
    the temp file if it had been created.

    If the compress keyword is passed, the content is compressed in
    memory before writing.  The only compression currently supported
    is gzip.

    """
    if compress == "gzip":
        config = Config()
        buffer = StringIO()
        compress = config.getint("broker", "gzip_level")
        zipper = gzip.GzipFile(filename, "wb", compress, buffer)
        zipper.write(content)
        zipper.close()
        content = buffer.getvalue()
    if mode is None:
        try:
            old_mode = os.stat(filename).st_mode
        except OSError:
            old_mode = 0644
    dirname, basename = os.path.split(filename)

    if not os.path.exists(dirname) and create_directory:
        os.makedirs(dirname)

    fd, fpath = mkstemp(prefix=basename, dir=dirname)
    try:
        with os.fdopen(fd, "w") as f:
            f.write(content)
        if mode is None:
            os.chmod(fpath, old_mode)
        else:
            os.chmod(fpath, mode)
        os.rename(fpath, filename)
    finally:
        if os.path.exists(fpath):
            os.remove(fpath)
Пример #14
0
    def setUp(self):
        self.config = Config()
        self.net = DummyNetworks()

        # Need to import protocol buffers after we have the config
        # object all squared away and we can set the sys.path
        # variable appropriately.
        # It would be simpler just to change sys.path in runtests.py,
        # but this allows for each test to be run individually (without
        # the runtests.py wrapper).
        protodir = self.config.get("protocols", "directory")
        if protodir not in sys.path:
            sys.path.append(protodir)
        for m in [
                'aqdsystems_pb2', 'aqdnetworks_pb2', 'aqdservices_pb2',
                'aqddnsdomains_pb2', 'aqdlocations_pb2', 'aqdaudit_pb2',
                'aqdparamdefinitions_pb2', 'aqdparameters_pb2'
        ]:
            globals()[m] = __import__(m)

        self.user = self.config.get("broker", "user")
        self.sandboxdir = os.path.join(
            self.config.get("broker", "templatesdir"), self.user)
        self.template_extension = self.config.get("panc", "template_extension")

        # This method is cumbersome.  Should probably develop something
        # like unittest.conf.defaults.
        if self.config.has_option("unittest", "scratchdir"):
            self.scratchdir = self.config.get("unittest", "scratchdir")
            if not os.path.exists(self.scratchdir):
                os.makedirs(self.scratchdir)
        if self.config.has_option("unittest", "aurora_with_node"):
            self.aurora_with_node = self.config.get("unittest",
                                                    "aurora_with_node")
        else:
            self.aurora_with_node = "oyidb1622"
        if self.config.has_option("unittest", "aurora_without_node"):
            self.aurora_without_node = self.config.get("unittest",
                                                       "aurora_without_node")
        else:
            self.aurora_without_node = "pissp1"
        self.gzip_profiles = self.config.getboolean("panc", "gzip_output")
        self.profile_suffix = ".xml.gz" if self.gzip_profiles else ".xml"

        dsdb_coverage_dir = os.path.join(
            self.config.get("unittest", "scratchdir"), "dsdb_coverage")
        for name in [
                DSDB_EXPECT_SUCCESS_FILE, DSDB_EXPECT_FAILURE_FILE,
                DSDB_ISSUED_CMDS_FILE, DSDB_EXPECT_FAILURE_ERROR
        ]:
            path = os.path.join(dsdb_coverage_dir, name)
            try:
                os.remove(path)
            except OSError:
                pass
Пример #15
0
    def _snapshot_db(self, test):
        # If there was an error, and we're using SQLite, create a snapshot
        # TODO: create a git-managed snapshot of the plenaries/profiles as well
        config = Config()
        dsn = config.get("database", "dsn")
        if dsn.startswith("sqlite:///"):

            dbfile = dsn[10:]
            target = dbfile + ".%s:%s" % (test.__class__.__name__,
                                          test._testMethodName)
            call(["/bin/cp", "-a", dbfile, target])
Пример #16
0
 def __init__(self, dbhost, logger=LOGGER):
     if not isinstance(dbhost, Host):
         raise InternalError("PlenaryHost called with %s instead of Host" %
                             dbhost.__class__.name)
     PlenaryCollection.__init__(self, logger=logger)
     self.dbobj = dbhost
     self.config = Config()
     if self.config.getboolean("broker", "namespaced_host_profiles"):
         self.plenaries.append(PlenaryNamespacedHost(dbhost))
     if self.config.getboolean("broker", "flat_host_profiles"):
         self.plenaries.append(PlenaryToplevelHost(dbhost))
     self.plenaries.append(PlenaryHostData(dbhost))
Пример #17
0
 def outputdirs(self):
     """Returns a list of directories that should exist before compiling"""
     config = Config()
     dirs = []
     dirs.append(config.get("broker", "profilesdir"))
     # The regression tests occasionally have issues with panc
     # auto-creating this directory - not sure why.
     if self.domain.clusters:
         dirs.append(os.path.join(config.get("broker", "quattordir"),
                                  "build", "xml", self.domain.name,
                                  "clusters"))
     return dirs
Пример #18
0
def main():
    parser = argparse.ArgumentParser(description="Send out broker notifications")
    parser.add_argument("-c", "--config", dest="config",
                        help="location of the broker configuration file")
    parser.add_argument("--one_shot", action="store_true",
                        help="do just a single run and then exit")
    parser.add_argument("--debug", action="store_true",
                        help="turn on debug logs on stderr")

    opts = parser.parse_args()

    config = Config(configfile=opts.config)

    # These modules must be imported after the configuration has been
    # initialized
    from aquilon.aqdb.db_factory import DbFactory

    db = DbFactory()

    if opts.debug:
        level = logging.DEBUG
        logging.basicConfig(level=level, stream=sys.stderr,
                            format='%(asctime)s [%(levelname)s] %(message)s')
    else:
        level = logging.INFO
        logfile = os.path.join(config.get("broker", "logdir"), "aq_notifyd.log")

        handler = WatchedFileHandler(logfile)
        handler.setLevel(level)

        formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
        handler.setFormatter(formatter)

        rootlog = logging.getLogger()
        rootlog.addHandler(handler)
        rootlog.setLevel(level)

    # Apply configured log settings
    for logname, level in config.items("logging"):
        if level not in logging._levelNames:
            continue
        logging.getLogger(logname).setLevel(logging._levelNames[level])

    logger = logging.getLogger("aq_notifyd")

    if opts.one_shot:
        update_index_and_notify(config, logger, db)
    else:
        signal.signal(signal.SIGTERM, exit_handler)
        signal.signal(signal.SIGINT, exit_handler)

        run_loop(config, logger, db)
Пример #19
0
    def testdisabletemplatetests(self):
        config = Config()
        kingdir = config.get("broker", "kingdir")
        rundir = config.get("broker", "rundir")
        env = {}
        env["PATH"] = "%s:%s" % (config.get(
            "broker", "git_path"), os.environ.get("PATH", ""))

        tempdir = mkdtemp(prefix="fixup", dir=rundir)

        p = Popen(("git", "clone", "--shared", kingdir, "template-king",
                   "--branch", "prod"),
                  cwd=tempdir,
                  env=env,
                  stdout=PIPE,
                  stderr=PIPE)
        out, err = p.communicate()
        self.assertEqual(p.returncode, 0, "Failed to clone template-king")

        repodir = os.path.join(tempdir, "template-king")
        makefile = os.path.join(repodir, "Makefile")
        if os.path.exists(os.path.join(repodir, "t", "Makefile")):
            p = Popen(("git", "rm", "-f", os.path.join("t", "Makefile")),
                      cwd=repodir,
                      env=env,
                      stdout=PIPE,
                      stderr=PIPE)
            out, err = p.communicate()
            self.assertEqual(p.returncode, 0, "Failed to remove t/Makefile")

            p = Popen(("git", "commit", "-m", "Removed t/Makefile"),
                      cwd=repodir,
                      env=env,
                      stdout=PIPE,
                      stderr=PIPE)
            out, err = p.communicate()
            self.assertEqual(p.returncode, 0,
                             "Failed to commit removal of t/Makefile")

            for branch in ['prod', 'ny-prod']:
                p = Popen(("git", "push", "origin", "prod:%s" % branch),
                          cwd=repodir,
                          env=env,
                          stdout=PIPE,
                          stderr=PIPE)
                out, err = p.communicate()
                self.assertEqual(
                    p.returncode, 0, "Failed to push to %s, "
                    "STDOUT:\n@@@\n'%s'\n@@@\nSTDERR:\n@@@\n'%s'\n@@@\n" %
                    (branch, out, err))
        p = Popen(("rm", "-rf", tempdir))
        p.communicate()
Пример #20
0
    def directories(self):
        """Return a list of directories required for compiling this domain"""
        config = Config()
        dirs = []

        if self.domain.branch_type == "domain":
            dirs.append(os.path.join(config.get("broker", "domainsdir"), self.domain.name))

        dirs.append(os.path.join(config.get("broker", "quattordir"), "cfg", "domains", self.domain.name))

        dirs.append(os.path.join(config.get("broker", "quattordir"), "build", "xml", self.domain.name))

        return dirs
Пример #21
0
    def __init__(self, dbhost, logger=LOGGER):
        super(PlenaryHost, self).__init__(logger=logger)

        if not isinstance(dbhost, Host):
            raise InternalError("PlenaryHost called with %s instead of Host" %
                                dbhost.__class__.name)
        self.dbobj = dbhost
        config = Config()
        if config.getboolean("broker", "namespaced_host_profiles"):
            self.plenaries.append(PlenaryNamespacedHost.get_plenary(dbhost))
        if config.getboolean("broker", "flat_host_profiles"):
            self.plenaries.append(PlenaryToplevelHost.get_plenary(dbhost))
        self.plenaries.append(PlenaryHostData.get_plenary(dbhost))
Пример #22
0
def sync_domain(dbdomain, logger=LOGGER, locked=False):
    """Update templates on disk to match contents of branch in template-king.

    If this domain is tracking another, first update the branch in
    template-king with the latest from the tracking branch.  Also save
    the current (previous) commit as a potential rollback point.

    """
    config = Config()
    session = object_session(dbdomain)
    kingdir = config.get("broker", "kingdir")
    domaindir = os.path.join(config.get("broker", "domainsdir"), dbdomain.name)
    git_env = {
        "PATH":
        "%s:%s" %
        (config.get("broker", "git_path"), os.environ.get("PATH", ""))
    }
    if dbdomain.tracked_branch:
        # Might need to revisit if using this helper from rollback...
        run_command([
            "git", "push", ".",
            "%s:%s" % (dbdomain.tracked_branch.name, dbdomain.name)
        ],
                    path=kingdir,
                    env=git_env,
                    logger=logger)
    run_command(["git", "fetch", "--prune"],
                path=domaindir,
                env=git_env,
                logger=logger)
    if dbdomain.tracked_branch:
        out = run_command(["git", "rev-list", "-n", "1", "HEAD"],
                          path=domaindir,
                          env=git_env,
                          logger=logger)
        rollback_commit = out.strip()
    try:
        if not locked:
            key = CompileKey(domain=dbdomain.name, logger=logger)
            lock_queue.acquire(key)
        run_command(["git", "reset", "--hard",
                     "origin/%s" % dbdomain.name],
                    path=domaindir,
                    env=git_env,
                    logger=logger)
    finally:
        if not locked:
            lock_queue.release(key)
    if dbdomain.tracked_branch:
        dbdomain.rollback_commit = rollback_commit
        session.add(dbdomain)
Пример #23
0
    def __init__(self, dbobj=None, logger=LOGGER):
        self.config = Config()
        self.dbobj = dbobj
        self.logger = logger

        if self.template_type is None:
            raise InternalError("Plenary class %s did not set the template "
                                "type" % self.__class__.__name__)

        # Object templates live under the branch-specific build directory.
        # Everything else lives under the common plenary directory.
        if self.template_type == "object":
            if not dbobj or not hasattr(dbobj, "branch"):
                raise InternalError("Plenaries meant to be compiled need a DB "
                                    "object that has a branch; got: %r" % dbobj)
            self.dir = "%s/domains/%s/profiles" % (
                self.config.get("broker", "builddir"), dbobj.branch.name)
        else:
            self.dir = self.config.get("broker", "plenarydir")

        self.loadpath = None
        self.plenary_template = None
        self.plenary_core = None

        self.new_content = None
        # The following attributes are for stash/restore_stash
        self.old_content = None
        self.old_mtime = None
        self.stashed = False
        self.removed = False
        self.changed = False
Пример #24
0
    def compile(self, session, only=None, locked=False,
                panc_debug_include=None, panc_debug_exclude=None,
                cleandeps=False):
        """The build directories are checked and constructed
        if necessary, so no prior setup is required.  The compile may
        take some time (current rate is 10 hosts per second, with a
        couple of seconds of constant overhead), and the possibility
        of blocking on the compile lock.

        If the 'only' parameter is provided, then it should be a
        list or set containing the profiles that need to be compiled.

        May raise ArgumentError exception, else returns the standard
        output (as a string) of the compile
        """

        config = Config()

        if self.domain.branch_type == 'sandbox':
            if not self.author:
                raise InternalError("Missing required author to compile "
                                    "sandbox %s" % self.domain.name)
            sandboxdir = os.path.join(config.get("broker", "templatesdir"),
                                      self.author.name, self.domain.name)
            if not os.path.exists(sandboxdir):
                raise ArgumentError("Sandbox directory '%s' does not exist." %
                                    sandboxdir)
            if not self.sandbox_has_latest(config, sandboxdir):
                self.logger.warn("Sandbox %s/%s does not contain the "
                                 "latest changes from the prod domain.  If "
                                 "there are failures try "
                                 "`git fetch && git merge origin/prod`" %
                                 (self.author.name, self.domain.name))

        self.logger.info("preparing domain %s for compile" % self.domain.name)

        # Ensure that the compile directory is in a good state.
        outputdir = config.get("broker", "profilesdir")

        for d in self.directories() + self.outputdirs():
            if not os.path.exists(d):
                try:
                    self.logger.info("creating %s" % d)
                    os.makedirs(d)
                except OSError, e:
                    raise ArgumentError("Failed to mkdir %s: %s" % (d, e))
Пример #25
0
    def __init__(self, network=None, network_type=None, **kw):
        # pylint: disable=W0621
        if not isinstance(network, IPv4Network):
            raise InternalError("Expected an IPv4Network, got: %s" %
                                type(network))

        if not network_type:
            config = Config()
            network_type = config.get("broker", "default_network_type")

        self._network = network
        self._props = self.network_type_map.get(self.network_type,
                                                self.default_network_props)

        super(Network, self).__init__(ip=network.network,
                                      cidr=network.prefixlen,
                                      network_type=network_type, **kw)
Пример #26
0
    def __init__(self, network=None, network_type=None, **kw):
        # pylint: disable=W0621
        if not isinstance(network, IPv4Network):
            raise InternalError("Expected an IPv4Network, got: %s" %
                                type(network))

        if not network_type:
            config = Config()
            network_type = config.get("broker", "default_network_type")

        self._network = network
        self._props = self.network_type_map.get(self.network_type,
                                                self.default_network_props)

        super(Network, self).__init__(ip=network.network,
                                      cidr=network.prefixlen,
                                      network_type=network_type, **kw)
Пример #27
0
class ObjectFormatter(object):
    """This class and its subclasses are meant to do the real work of
        formatting individual objects.  The standard instance methods
        do the heavy lifting, which the static methods allow for
        delegation when needed.

        The instance methods (format_*) provide default implementations,
        but it is expected that they will be overridden to provide more
        useful information.
     """

    loaded_protocols = {}
    """The loaded_protocols dict will store the modules that are being
    loaded for each requested protocol. Rather than trying to import one
    each time, the dict can be checked and value returned."""
    config = Config()
    protodir = config.get("protocols", "directory")

    handlers = {}
    """ The handlers dictionary should have an entry for every subclass.
        Typically this will be defined immediately after defining the
        subclass.

    """

    mako_dir = os.path.join(config.get("broker", "srcdir"), "lib", "python2.6",
                            "aquilon", "worker", "formats", "mako")
    # Be careful about using the module_directory and cache!
    # Not using module_directory so that we don't have to worry about stale
    # files hanging around on upgrade.  Race conditions in writing the files
    # might also be an issue when we switch to multi-process.
    # Not using cache because it only has the lifetime of the template, and
    # because we do not have the beaker module installed.
    lookup_raw = TemplateLookup(directories=[os.path.join(mako_dir, "raw")],
                                imports=[
                                    'from string import rstrip', 'from '
                                    'aquilon.worker.formats.formatters '
                                    'import shift'
                                ],
                                default_filters=['unicode', 'rstrip'])
    lookup_html = TemplateLookup(directories=[os.path.join(mako_dir, "html")])

    def __init__(self):
        if hasattr(self, "protocol"):
            if not self.protocol in self.loaded_protocols:
                try:
                    self.loaded_protocols[self.protocol] = __import__(
                        self.protocol)
                except ImportError, e:  # pragma: no cover
                    self.loaded_protocols[self.protocol] = False
                    error = "path %s protocol: %s error: %s" % (
                        self.protodir, self.protocol, e)
                    raise ProtocolError(error)
            else:  # pragma: no cover
                if self.loaded_protocols[self.protocol] == False:
                    error = "path %s protocol: %s error: previous import attempt was unsuccessful" % (
                        self.protodir, self.protocol)
                    raise ProtocolError(error)
Пример #28
0
def main():
    from aquilon.config import Config

    config = Config()
    if config.has_option("database", "module"):
        ms.modulecmd.load(config.get("database", "module"))

    db = DbFactory()
    Base.metadata.bind = db.engine

    session = db.Session()

    add_interfaces(session)
    add_addresses(session)

    session.rollback()
    raise Exception("Replace the rollback() in the code with commit() when "
                    "ready to go, and disable this exception")
Пример #29
0
def main():
    from aquilon.config import Config

    config = Config()
    if config.has_option("database", "module"):
        ms.modulecmd.load(config.get("database", "module"))

    db = DbFactory()
    Base.metadata.bind = db.engine

    session = db.Session()

    add_interfaces(session)
    add_addresses(session)

    session.rollback()
    raise Exception("Replace the rollback() in the code with commit() when "
                    "ready to go, and disable this exception")
Пример #30
0
    def compile(
        self, session, only=None, locked=False, panc_debug_include=None, panc_debug_exclude=None, cleandeps=False
    ):
        """The build directories are checked and constructed
        if necessary, so no prior setup is required.  The compile may
        take some time (current rate is 10 hosts per second, with a
        couple of seconds of constant overhead), and the possibility
        of blocking on the compile lock.

        If the 'only' parameter is provided, then it should be a
        list or set containing the profiles that need to be compiled.

        May raise ArgumentError exception, else returns the standard
        output (as a string) of the compile
        """

        config = Config()

        if self.domain.branch_type == "sandbox":
            if not self.author:
                raise InternalError("Missing required author to compile " "sandbox %s" % self.domain.name)
            sandboxdir = os.path.join(config.get("broker", "templatesdir"), self.author.name, self.domain.name)
            if not os.path.exists(sandboxdir):
                raise ArgumentError("Sandbox directory '%s' does not exist." % sandboxdir)
            if not self.sandbox_has_latest(config, sandboxdir):
                self.logger.warn(
                    "Sandbox %s/%s does not contain the "
                    "latest changes from the prod domain.  If "
                    "there are failures try "
                    "`git fetch && git merge origin/prod`" % (self.author.name, self.domain.name)
                )

        self.logger.info("preparing domain %s for compile" % self.domain.name)

        # Ensure that the compile directory is in a good state.
        outputdir = config.get("broker", "profilesdir")

        for d in self.directories() + self.outputdirs():
            if not os.path.exists(d):
                try:
                    self.logger.info("creating %s" % d)
                    os.makedirs(d)
                except OSError, e:
                    raise ArgumentError("Failed to mkdir %s: %s" % (d, e))
Пример #31
0
    def __init__(self):
        """ Provides some convenient variables for commands.

        Also sets requires_* parameters for some sets of commands.
        All of the command objects are singletons (or Borg).

        """
        self.dbf = DbFactory()
        self.config = Config()
        self.az = AuthorizationBroker()
        self.formatter = ResponseFormatter()
        self.catalog = StatusCatalog()
        # Force the instance to have local copies of the class defaults...
        # This allows resources.py to modify instances without worrying
        # about inheritance issues (classes sharing required or optional
        # parameters).
        self.required_parameters = self.required_parameters[:]
        self.optional_parameters = self.optional_parameters[:]

        # Parameter checks are filled in automatically based on input.xml. This
        # lets us do some rudimentary checks before the actual command is
        # invoked.
        self.parameter_checks = {}

        self.action = self.__module__
        package_prefix = "aquilon.worker.commands."
        if self.action.startswith(package_prefix):
            self.action = self.action[len(package_prefix):]
        # self.command is set correctly in resources.py after parsing input.xml
        self.command = self.action
        # The readonly and format flags are done here for convenience
        # and simplicity.  They could be overridden by the __init__
        # method of any show/search/cat commands that do not want these
        # defaults.  Some 'one-off' commands (like ping and status)
        # just set the variables themselves.
        if self.action.startswith("show") or self.action.startswith("search"):
            self.requires_readonly = True
            self.requires_format = True
        if self.action.startswith("cat"):
            self.requires_format = True
            self.requires_readonly = True
            self._is_lock_free = True
        if not self.requires_readonly \
           and self.config.get('broker', 'mode') == 'readonly':
            self.badmode = 'readonly'
        else:
            self.badmode = False
        self._update_render(self.render)
        if not self.defer_to_thread:
            if self.requires_azcheck or self.requires_transaction:
                self.defer_to_thread = True
                log.msg("Forcing defer_to_thread to True because of "
                        "required authorization or transaction for %s" %
                        self.command)
            # Not sure how to handle formatting with deferred...
            self.requires_format = False
Пример #32
0
    def testclonetemplateking(self):
        config = Config()
        source = config.get("unittest", "template_base")
        dest = config.get("broker", "kingdir")
        p = Popen(("/bin/rm", "-rf", dest), stdout=1, stderr=2)
        rc = p.wait()
        self.assertEqual(
            rc, 0, "Failed to clear old template-king directory '%s'" % dest)
        env = {}
        env["PATH"] = "%s:%s" % (config.get(
            "broker", "git_path"), os.environ.get("PATH", ""))
        p = Popen(("git", "clone", "--bare", source, dest),
                  env=env,
                  stdout=PIPE,
                  stderr=PIPE)
        (out, err) = p.communicate()
        # Ignore out/err unless we get a non-zero return code, then log it.
        self.assertEqual(
            p.returncode, 0,
            "Non-zero return code for clone of template-king, "
            "STDOUT:\n@@@\n'%s'\n@@@\nSTDERR:\n@@@\n'%s'\n@@@\n" % (out, err))
        # This value can be used to test against a different branch/commit
        # than the current 'prod'.
        new_prod = None
        if config.has_option("unittest", "template_alternate_prod"):
            new_prod = config.get("unittest", "template_alternate_prod")

        if new_prod:
            for domain in ['prod', 'ny-prod']:
                p = Popen(("git", "push", ".", '+%s:%s' % (new_prod, domain)),
                          env=env,
                          cwd=dest,
                          stdout=PIPE,
                          stderr=PIPE)
                (out, err) = p.communicate()
                # Ignore out/err unless we get a non-zero return code, then log it.
                self.assertEqual(
                    p.returncode, 0,
                    "Non-zero return code while setting alternate "
                    "'%s' branch locally to '%s':"
                    "\nSTDOUT:\n@@@\n'%s'\n@@@\n"
                    "\nSTDERR:\n@@@\n'%s'\n@@@\n" %
                    (domain, new_prod, out, err))

        # Set the default branch
        p = Popen(("git", "symbolic-ref", "HEAD", "refs/heads/prod"),
                  env=env,
                  cwd=dest,
                  stdout=PIPE,
                  stderr=PIPE)
        (out, err) = p.communicate()
        self.assertEqual(
            p.returncode, 0, "Non-zero return code while setting HEAD "
            "to refs/heads/prod:"
            "\nSTDOUT:\n@@@\n'%s'\n@@@\n"
            "\nSTDERR:\n@@@\n'%s'\n@@@\n" % (out, err))
        return
Пример #33
0
    def directories(self):
        """Return a list of directories required for compiling this domain"""
        config = Config()
        dirs = []

        if self.domain.branch_type == 'domain':
            dirs.append(os.path.join(config.get("broker", "domainsdir"),
                                     self.domain.name))

        dirs.append(os.path.join(config.get("broker", "quattordir"),
                                 "cfg",
                                 "domains",
                                 self.domain.name))

        dirs.append(os.path.join(config.get("broker", "quattordir"),
                                 "build",
                                 "xml",
                                 self.domain.name))

        return dirs
Пример #34
0
 def testcloneswrep(self):
     config = Config()
     source = config.get("unittest", "swrep_repository")
     dest = os.path.join(config.get("broker", "swrepdir"), "repository")
     p = Popen(("/bin/rm", "-rf", dest), stdout=1, stderr=2)
     rc = p.wait()
     self.assertEqual(rc, 0,
                      "Failed to clear old swrep directory '%s'" % dest)
     env = {}
     env["PATH"] = "%s:%s" % (config.get(
         "broker", "git_path"), os.environ.get("PATH", ""))
     p = Popen(("git", "clone", source, dest),
               env=env,
               stdout=PIPE,
               stderr=PIPE)
     (out, err) = p.communicate()
     # Ignore out/err unless we get a non-zero return code, then log it.
     self.assertEqual(
         p.returncode, 0, "Non-zero return code for clone of swrep, "
         "STDOUT:\n@@@\n'%s'\n@@@\nSTDERR:\n@@@\n'%s'\n@@@\n" % (out, err))
     return
Пример #35
0
 def testcloneswrep(self):
     config = Config()
     source = config.get("unittest", "swrep_repository")
     dest = os.path.join(config.get("broker", "swrepdir"), "repository")
     p = Popen(("/bin/rm", "-rf", dest), stdout=1, stderr=2)
     rc = p.wait()
     self.assertEqual(rc, 0,
                      "Failed to clear old swrep directory '%s'" %
                      dest)
     env = {}
     env["PATH"] = "%s:%s" % (config.get("broker", "git_path"),
                              os.environ.get("PATH", ""))
     p = Popen(("git", "clone", source, dest),
               env=env, stdout=PIPE, stderr=PIPE)
     (out, err) = p.communicate()
     # Ignore out/err unless we get a non-zero return code, then log it.
     self.assertEqual(p.returncode, 0,
                      "Non-zero return code for clone of swrep, "
                      "STDOUT:\n@@@\n'%s'\n@@@\nSTDERR:\n@@@\n'%s'\n@@@\n"
                      % (out, err))
     return
Пример #36
0
    def directories(self):
        """Return a list of directories required for compiling this domain"""
        config = Config()
        dirs = []

        if self.domain.branch_type == 'domain':
            dirs.append(os.path.join(config.get("broker", "domainsdir"),
                                     self.domain.name))

        dirs.append(os.path.join(config.get("broker", "cfgdir"),
                                 "domains", self.domain.name))

        # This is a bit redundant. When creating the directories, the "clusters"
        # subdir would be enough; when removing them, the base dir would be
        # enough. Having both does not hurt and does not need such extra logic.
        dirs.append(os.path.join(config.get("broker", "quattordir"),
                                 "build", self.domain.name))
        dirs.append(os.path.join(config.get("broker", "quattordir"),
                                 "build", self.domain.name, "clusters"))

        return dirs
Пример #37
0
def discover_network_types(dbapi_con, connection_record):  # pylint: disable=W0613
    config = Config()
    if not config.has_option("broker", "default_network_type"):  # pragma: no cover
        raise InternalError("The default_network_type option is missing from "
                            "the [broker] section in the configuration.")

    default_type = config.get("broker", "default_network_type")
    default_section = "network_" + default_type
    if not config.has_section(default_section):  # pragma: no cover
        raise InternalError("The default network type is %s, but there's no "
                            "section named [%s] in the configuration." %
                            (default_type, default_section))

    nettypes = {}

    # This function should be called only once, but you never know...
    if Network.network_type_map:
        return

    for section in config.sections():
        if not section.startswith("network_"):
            continue
        name = section[8:]
        nettypes[name] = NetworkProperties(config, name)
        LOGGER.info("Configured network type %s" % name)

    Network.network_type_map = nettypes
    Network.default_network_props = nettypes[default_type]
Пример #38
0
    def setUp(self):
        self.config = Config()
        self.net = DummyNetworks()

        # Need to import protocol buffers after we have the config
        # object all squared away and we can set the sys.path
        # variable appropriately.
        # It would be simpler just to change sys.path in runtests.py,
        # but this allows for each test to be run individually (without
        # the runtests.py wrapper).
        protodir = self.config.get("protocols", "directory")
        if protodir not in sys.path:
            sys.path.append(protodir)
        for m in ['aqdsystems_pb2', 'aqdnetworks_pb2', 'aqdservices_pb2',
                  'aqddnsdomains_pb2', 'aqdlocations_pb2', 'aqdaudit_pb2',
                  'aqdparamdefinitions_pb2', 'aqdparameters_pb2']:
            globals()[m] = __import__(m)

        self.user = self.config.get("broker", "user")
        self.sandboxdir = os.path.join(self.config.get("broker",
                                                       "templatesdir"),
                                       self.user)
        self.template_extension = self.config.get("panc", "template_extension")

        # This method is cumbersome.  Should probably develop something
        # like unittest.conf.defaults.
        if self.config.has_option("unittest", "scratchdir"):
            self.scratchdir = self.config.get("unittest", "scratchdir")
            if not os.path.exists(self.scratchdir):
                os.makedirs(self.scratchdir)
        if self.config.has_option("unittest", "aurora_with_node"):
            self.aurora_with_node = self.config.get("unittest",
                    "aurora_with_node")
        else:
            self.aurora_with_node = "oyidb1622"
        if self.config.has_option("unittest", "aurora_without_node"):
            self.aurora_without_node = self.config.get("unittest",
                    "aurora_without_node")
        else:
            self.aurora_without_node = "pissp1"
        self.gzip_profiles = self.config.getboolean("panc", "gzip_output")
        self.profile_suffix = ".xml.gz" if self.gzip_profiles else ".xml"

        dsdb_coverage_dir = os.path.join(self.config.get("unittest", "scratchdir"),
                                         "dsdb_coverage")
        for name in [DSDB_EXPECT_SUCCESS_FILE, DSDB_EXPECT_FAILURE_FILE,
                     DSDB_ISSUED_CMDS_FILE, DSDB_EXPECT_FAILURE_ERROR]:
            path = os.path.join(dsdb_coverage_dir, name)
            try:
                os.remove(path)
            except OSError:
                pass
Пример #39
0
    def testdisabletemplatetests(self):
        config = Config()
        kingdir = config.get("broker", "kingdir")
        rundir = config.get("broker", "rundir")
        env = {}
        env["PATH"] = "%s:%s" % (config.get("broker", "git_path"),
                                 os.environ.get("PATH", ""))

        tempdir = mkdtemp(prefix="fixup", dir=rundir)

        p = Popen(("git", "clone", "--shared", kingdir, "template-king",
                   "--branch", "prod"),
                  cwd=tempdir, env=env, stdout=PIPE, stderr=PIPE)
        out, err = p.communicate()
        self.assertEqual(p.returncode, 0, "Failed to clone template-king")

        repodir = os.path.join(tempdir, "template-king")
        makefile = os.path.join(repodir, "Makefile")
        if os.path.exists(os.path.join(repodir, "t", "Makefile")):
            p = Popen(("git", "rm", "-f", os.path.join("t", "Makefile")),
                      cwd=repodir, env=env, stdout=PIPE, stderr=PIPE)
            out, err = p.communicate()
            self.assertEqual(p.returncode, 0, "Failed to remove t/Makefile")

            p = Popen(("git", "commit", "-m", "Removed t/Makefile"),
                      cwd=repodir, env=env, stdout=PIPE, stderr=PIPE)
            out, err = p.communicate()
            self.assertEqual(p.returncode, 0, "Failed to commit removal of t/Makefile")

            for branch in ['prod', 'ny-prod']:
                p = Popen(("git", "push", "origin", "prod:%s" % branch),
                          cwd=repodir, env=env, stdout=PIPE, stderr=PIPE)
                out, err = p.communicate()
                self.assertEqual(p.returncode, 0,
                                 "Failed to push to %s, "
                                 "STDOUT:\n@@@\n'%s'\n@@@\nSTDERR:\n@@@\n'%s'\n@@@\n"
                                 % (branch, out, err))
        p = Popen(("rm", "-rf", tempdir))
        p.communicate()
Пример #40
0
def write_file(filename, content, mode=None, logger=LOGGER, compress=None):
    """Atomically write content into the specified filename.

    The content is written into a temp file in the same directory as
    filename, and then swapped into place with rename.  This assumes
    that both the file and the directory can be written to by the
    broker.  The same directory was used instead of a temporary
    directory because atomic swaps are generally only available when
    the source and the target are on the same filesystem.

    If mode is set, change permissions on the file (newly created or
    pre-existing) to the new mode.  If unset and the file exists, the
    current permissions will be kept.  If unset and the file is new,
    the default is 0644.

    This method may raise OSError if any of the OS-related methods
    (creating the temp file, writing to it, correcting permissions,
    swapping into place) fail.  The method will attempt to remove
    the temp file if it had been created.

    If the compress keyword is passed, the content is compressed in
    memory before writing.  The only compression currently supported
    is gzip.

    """
    if compress == 'gzip':
        config = Config()
        buffer = StringIO()
        compress = config.getint('broker', 'gzip_level')
        zipper = gzip.GzipFile(filename, 'wb', compress, buffer)
        zipper.write(content)
        zipper.close()
        content = buffer.getvalue()
    if mode is None:
        try:
            old_mode = os.stat(filename).st_mode
        except OSError, e:
            old_mode = 0644
Пример #41
0
def write_file(filename, content, mode=None, logger=LOGGER, compress=None):
    """Atomically write content into the specified filename.

    The content is written into a temp file in the same directory as
    filename, and then swapped into place with rename.  This assumes
    that both the file and the directory can be written to by the
    broker.  The same directory was used instead of a temporary
    directory because atomic swaps are generally only available when
    the source and the target are on the same filesystem.

    If mode is set, change permissions on the file (newly created or
    pre-existing) to the new mode.  If unset and the file exists, the
    current permissions will be kept.  If unset and the file is new,
    the default is 0644.

    This method may raise OSError if any of the OS-related methods
    (creating the temp file, writing to it, correcting permissions,
    swapping into place) fail.  The method will attempt to remove
    the temp file if it had been created.

    If the compress keyword is passed, the content is compressed in
    memory before writing.  The only compression currently supported
    is gzip.

    """
    if compress == 'gzip':
        config = Config()
        buffer = StringIO()
        compress = config.getint('broker', 'gzip_level')
        zipper = gzip.GzipFile(filename, 'wb', compress, buffer)
        zipper.write(content)
        zipper.close()
        content = buffer.getvalue()
    if mode is None:
        try:
            old_mode = os.stat(filename).st_mode
        except OSError, e:
            old_mode = 0644
Пример #42
0
    def teststop(self):
        config = Config()
        pidfile = os.path.join(config.get("broker", "rundir"), "aqd.pid")
        self.assert_(os.path.exists(pidfile))
        f = file(pidfile)
        pid = f.readline()
        self.assertNotEqual(pid, "")
        f.close()
        pid = int(pid)
        os.kill(pid, signal.SIGTERM)

        # Wait for the broker to shut down. E.g. generating code coverage may
        # take some time.
        i = 0
        while i < 180:
            i += 1
            try:
                os.kill(pid, 0)
            except OSError:
                break
            sleep(1)

        # Verify that the broker is down
        self.failUnlessRaises(OSError, os.kill, pid, 0)
Пример #43
0
    def teststop(self):
        config = Config()
        pidfile = os.path.join(config.get("broker", "rundir"), "aqd.pid")
        self.assert_(os.path.exists(pidfile))
        f = file(pidfile)
        pid = f.readline()
        self.assertNotEqual(pid, "")
        f.close()
        pid = int(pid)
        os.kill(pid, signal.SIGTERM)

        # Wait for the broker to shut down. E.g. generating code coverage may
        # take some time.
        i = 0
        while i < 180:
            i += 1
            try:
                os.kill(pid, 0)
            except OSError:
                break
            sleep(1)

        # Verify that the broker is down
        self.failUnlessRaises(OSError, os.kill, pid, 0)
Пример #44
0
    def testrebuild(self):
        env = {}
        for (key, value) in os.environ.items():
            env[key] = value
        env["AQDCONF"] = Config().baseconfig

        cmd = ['./build_db.py', '--delete', '--populate', 'data/unittest.dump']

        _DIR = os.path.dirname(os.path.realpath(__file__))
        p = Popen(cmd, stdout=1, stderr=2, env=env, cwd=_DIR)
        (out, err) = p.communicate()

        self.assertEqual(
            p.returncode, 0, "Database rebuild failed with returncode %s:\n"
            "STDOUT:\n%s\nSTDERR:\n%s\n" % (p.returncode, out, err))
Пример #45
0
class PlenaryHost(PlenaryCollection):
    """
    A facade for Toplevel and Namespaced Hosts (below).

    This class creates either/both toplevel and namespaced host plenaries,
    based on broker configuration:
    namespaced_host_profiles (boolean):
      if namespaced profiles should be generated
    flat_host_profiles (boolean):
      if host profiles should be put into a "flat" toplevel (non-namespaced)
    """
    def __init__(self, dbhost, logger=LOGGER):
        if not isinstance(dbhost, Host):
            raise InternalError("PlenaryHost called with %s instead of Host" %
                                dbhost.__class__.name)
        PlenaryCollection.__init__(self, logger=logger)
        self.dbobj = dbhost
        self.config = Config()
        if self.config.getboolean("broker", "namespaced_host_profiles"):
            self.plenaries.append(PlenaryNamespacedHost(dbhost))
        if self.config.getboolean("broker", "flat_host_profiles"):
            self.plenaries.append(PlenaryToplevelHost(dbhost))
        self.plenaries.append(PlenaryHostData(dbhost))

    def write(self, locked=False, content=None):
        # Don't bother writing plenary files non-compilable archetypes.
        if not self.dbobj.archetype.is_compileable:
            return 0

        # Standard PlenaryCollection swallows IncompleteError.  If/when
        # the Host plenaries no longer raise that error this override
        # should be removed.
        total = 0
        for plenary in self.plenaries:
            total += plenary.write(locked=locked, content=content)
        return total
Пример #46
0
class PlenaryHost(PlenaryCollection):
    """
    A facade for Toplevel and Namespaced Hosts (below).

    This class creates either/both toplevel and namespaced host plenaries,
    based on broker configuration:
    namespaced_host_profiles (boolean):
      if namespaced profiles should be generated
    flat_host_profiles (boolean):
      if host profiles should be put into a "flat" toplevel (non-namespaced)
    """

    def __init__(self, dbhost, logger=LOGGER):
        if not isinstance(dbhost, Host):
            raise InternalError("PlenaryHost called with %s instead of Host" % dbhost.__class__.name)
        PlenaryCollection.__init__(self, logger=logger)
        self.dbobj = dbhost
        self.config = Config()
        if self.config.getboolean("broker", "namespaced_host_profiles"):
            self.plenaries.append(PlenaryNamespacedHost(dbhost))
        if self.config.getboolean("broker", "flat_host_profiles"):
            self.plenaries.append(PlenaryToplevelHost(dbhost))
        self.plenaries.append(PlenaryHostData(dbhost))

    def write(self, locked=False, content=None):
        # Don't bother writing plenary files non-compilable archetypes.
        if not self.dbobj.archetype.is_compileable:
            return 0

        # Standard PlenaryCollection swallows IncompleteError.  If/when
        # the Host plenaries no longer raise that error this override
        # should be removed.
        total = 0
        for plenary in self.plenaries:
            total += plenary.write(locked=locked, content=content)
        return total
Пример #47
0
def run_git(args,
            env=None,
            path=".",
            logger=LOGGER,
            loglevel=logging.INFO,
            filterre=None):
    config = Config()
    if env:
        git_env = env.copy()
    else:
        git_env = {}
    env_path = git_env.get("PATH", os.environ.get("PATH", ""))
    git_env["PATH"] = "%s:%s" % (config.get("broker", "git_path"), env_path)

    for name in [
            "git_author_name", "git_author_email", "git_committer_name",
            "git_committer_email"
    ]:
        if not config.has_option("broker", name):
            continue
        value = config.get("broker", name)
        git_env[name.upper()] = value

    if isinstance(args, list):
        git_args = args[:]
        if git_args[0] != "git":
            git_args.insert(0, "git")
    else:
        git_args = ["git", args]

    return run_command(git_args,
                       env=git_env,
                       path=path,
                       logger=logger,
                       loglevel=loglevel,
                       filterre=filterre)
Пример #48
0
def main():
    parser = argparse.ArgumentParser(description="Parse AQD configuration")
    parser.add_argument("-c", "--config", dest="config", action="store",
                        help="parse the given config file instead of the default")
    parser.add_argument("--get", metavar="SECTION.NAME", action="store",
                        help="get the value of the specified configuration key")
    parser.add_argument("--list", action="store_true",
                        help="list all defined configuration options and their values")

    opts = parser.parse_args()

    config = Config(configfile=opts.config)

    if opts.get:
        get_option(config, opts.get)
    elif opts.list:
        list_all(config)
    else:
        raise SystemExit("Please specify an action.")
Пример #49
0
    def testclonetemplateking(self):
        config = Config()
        source = config.get("unittest", "template_base")
        dest = config.get("broker", "kingdir")
        p = Popen(("/bin/rm", "-rf", dest), stdout=1, stderr=2)
        rc = p.wait()
        self.assertEqual(rc, 0,
                         "Failed to clear old template-king directory '%s'" %
                         dest)
        env = {}
        env["PATH"] = "%s:%s" % (config.get("broker", "git_path"),
                                 os.environ.get("PATH", ""))
        p = Popen(("git", "clone", "--bare", source, dest),
                  env=env, stdout=PIPE, stderr=PIPE)
        (out, err) = p.communicate()
        # Ignore out/err unless we get a non-zero return code, then log it.
        self.assertEqual(p.returncode, 0,
                         "Non-zero return code for clone of template-king, "
                         "STDOUT:\n@@@\n'%s'\n@@@\nSTDERR:\n@@@\n'%s'\n@@@\n"
                         % (out, err))
        # This value can be used to test against a different branch/commit
        # than the current 'prod'.
        new_prod = None
        if config.has_option("unittest", "template_alternate_prod"):
            new_prod = config.get("unittest", "template_alternate_prod")

        if new_prod:
            for domain in ['prod', 'ny-prod']:
                p = Popen(("git", "push", ".", '+%s:%s' % (new_prod, domain)),
                          env=env, cwd=dest, stdout=PIPE, stderr=PIPE)
                (out, err) = p.communicate()
                # Ignore out/err unless we get a non-zero return code, then log it.
                self.assertEqual(p.returncode, 0,
                                 "Non-zero return code while setting alternate "
                                 "'%s' branch locally to '%s':"
                                 "\nSTDOUT:\n@@@\n'%s'\n@@@\n"
                                 "\nSTDERR:\n@@@\n'%s'\n@@@\n"
                                 % (domain, new_prod, out, err))

        # Set the default branch
        p = Popen(("git", "symbolic-ref", "HEAD", "refs/heads/prod"),
                  env=env, cwd=dest, stdout=PIPE, stderr=PIPE)
        (out, err) = p.communicate()
        self.assertEqual(p.returncode, 0,
                         "Non-zero return code while setting HEAD "
                         "to refs/heads/prod:"
                         "\nSTDOUT:\n@@@\n'%s'\n@@@\n"
                         "\nSTDERR:\n@@@\n'%s'\n@@@\n"
                         % (out, err))
        return
Пример #50
0
def main():
    parser = argparse.ArgumentParser(description="Compile templates")
    parser.add_argument("-c",
                        "--config",
                        dest="config",
                        action="store",
                        help="location of the config file",
                        default=os.path.join(SRCDIR, "etc",
                                             "aqd.conf.defaults"))
    parser.add_argument("--basedir",
                        action="store",
                        required=True,
                        help="base directory")
    parser.add_argument("--domain",
                        action="store",
                        required=True,
                        help="domain name to compile")
    parser.add_argument("--compress_output",
                        action="store_true",
                        help="compress the generated profiles")
    parser.add_argument("--panc_jar",
                        action="store",
                        help="location of panc.jar")
    parser.add_argument("--templates",
                        action="store",
                        required=True,
                        help="location of the domain templates")
    parser.add_argument("--swrep",
                        action="store",
                        help="location of the swrep templates")
    parser.add_argument("--batch_size",
                        action="store",
                        type=int,
                        help="compiler batch size")

    options = parser.parse_args()
    config = Config(configfile=options.config)

    return run_domain_compile(options, config)
Пример #51
0
    def makeService(self, options):
        # Start up coverage ASAP.
        coverage_dir = options["coveragedir"]
        if coverage_dir:
            os.makedirs(coverage_dir, 0755)
            if options["coveragerc"]:
                coveragerc = options["coveragerc"]
            else:
                coveragerc = None
            self.coverage = coverage.coverage(config_file=coveragerc)
            self.coverage.erase()
            self.coverage.start()

        # Get the config object.
        config = Config(configfile=options["config"])

        # Helper for finishing off the coverage report.
        def stop_coverage():
            log.msg("Finishing coverage")
            self.coverage.stop()
            aquilon_srcdir = os.path.join(config.get("broker", "srcdir"),
                                          "lib", "python2.6", "aquilon")
            sourcefiles = []
            for dirpath, dirnames, filenames in os.walk(aquilon_srcdir):
                # FIXME: try to do this from the coverage config file
                if dirpath.endswith("aquilon"):
                    dirnames.remove("client")
                elif dirpath.endswith("aqdb"):
                    dirnames.remove("utils")

                for filename in filenames:
                    if not filename.endswith('.py'):
                        continue
                    sourcefiles.append(os.path.join(dirpath, filename))

            self.coverage.html_report(sourcefiles, directory=coverage_dir)
            self.coverage.xml_report(sourcefiles,
                                     outfile=os.path.join(coverage_dir, "aqd.xml"))

            with open(os.path.join(coverage_dir, "aqd.coverage"), "w") as outfile:
                self.coverage.report(sourcefiles, file=outfile)

        # Make sure the coverage report gets generated.
        if coverage_dir:
            reactor.addSystemEventTrigger('after', 'shutdown', stop_coverage)

        # Set up the environment...
        m = Modulecmd()
        log_module_load(m, config.get("broker", "CheckNet_module"))
        if config.has_option("database", "module"):
            log_module_load(m, config.get("database", "module"))
        sys.path.append(config.get("protocols", "directory"))

        # Set this up before the aqdb libs get imported...
        integrate_logging(config)

        progname = os.path.split(sys.argv[0])[1]
        if progname == 'aqd':
            if config.get('broker', 'mode') != 'readwrite':
                log.msg("Broker started with aqd symlink, "
                        "setting config mode to readwrite")
                config.set('broker', 'mode', 'readwrite')
        if progname == 'aqd_readonly':
            if config.get('broker', 'mode') != 'readonly':
                log.msg("Broker started with aqd_readonly symlink, "
                        "setting config mode to readonly")
                config.set('broker', 'mode', 'readonly')
        log.msg("Loading broker in mode %s" % config.get('broker', 'mode'))

        # Dynamic import means that we can parse config options before
        # importing aqdb.  This is a hack until aqdb can be imported without
        # firing up database connections.
        resources = __import__("aquilon.worker.resources", globals(), locals(),
                ["RestServer"], -1)
        RestServer = getattr(resources, "RestServer")

        restServer = RestServer(config)
        openSite = AnonSite(restServer)

        # twisted is nicely changing the umask for us when the process is
        # set to daemonize.  This sets it back.
        restServer.set_umask()
        reactor.addSystemEventTrigger('after', 'startup', restServer.set_umask)
        reactor.addSystemEventTrigger('after', 'startup',
                                      restServer.set_thread_pool_size)

        sockdir = config.get("broker", "sockdir")
        if not os.path.exists(sockdir):
            os.makedirs(sockdir, 0700)
        os.chmod(sockdir, 0700)

        if options["usesock"]:
            return strports.service("unix:%s/aqdsock" % sockdir, openSite)

        openport = config.get("broker", "openport")
        if config.has_option("broker", "bind_address"):
            bind_address = config.get("broker", "bind_address").strip()
            openaddr = "tcp:%s:interface=%s" % (openport, bind_address)
        else:  # pragma: no cover
            bind_address = None
            openaddr = "tcp:%s" % openport

        # Return before firing up knc.
        if options["noauth"]:
            return strports.service(openaddr, openSite)

        sockname = os.path.join(sockdir, "kncsock")
        # This flag controls whether or not this process will start up
        # and monitor knc.  Except for noauth mode knc has to be running,
        # but this process doesn't have to be the thing that starts it up.
        if config.getboolean("broker", "run_knc") or \
           config.getboolean("broker", "run_git_daemon"):
            mon = GracefulProcessMonitor()
            # FIXME: Should probably run krb5_keytab here as well.
            # and/or verify that the keytab file exists.
            if config.getboolean("broker", "run_knc"):
                keytab = config.get("broker", "keytab")
                knc_args = ["/usr/bin/env",
                            "KRB5_KTNAME=FILE:%s" % keytab,
                            config.get("kerberos", "knc"), "-lS", sockname]
                if bind_address:
                    knc_args.append("-a")
                    knc_args.append(bind_address)
                knc_args.append(config.get("broker", "kncport"))
                mon.addProcess("knc", knc_args)
            if config.getboolean("broker", "run_git_daemon"):
                # The git daemon *must* be invoked using the form 'git-daemon'
                # instead of invoking git with a 'daemon' argument.  The latter
                # will fork and exec git-daemon, resulting in a new pid that
                # the process monitor won't know about!
                gitpath = config.get("broker", "git_path")
                gitdaemon = config.get("broker", "git_daemon")
                ospath = os.environ.get("PATH", "")
                args = ["/usr/bin/env", "PATH=%s:%s" % (gitpath, ospath),
                        gitdaemon, "--export-all", "--base-path=%s" %
                        config.get("broker", "git_daemon_basedir")]
                if config.has_option("broker", "git_port"):
                    args.append("--port=%s" % config.get("broker", "git_port"))
                if bind_address:
                    args.append("--listen=%s" % bind_address)
                args.append(config.get("broker", "kingdir"))
                mon.addProcess("git-daemon", args)
            mon.startService()
            reactor.addSystemEventTrigger('before', 'shutdown', mon.stopService)

        # This socket is created by twisted and only accessed by knc as
        # connections come in.
        if os.path.exists(sockname):
            try:
                log.msg("Attempting to remove old socket '%s'" % sockname)
                os.remove(sockname)
                log.msg("Succeeded removing old socket.")
            except OSError, e:
                log.msg("Could not remove old socket '%s': %s" % (sockname, e))
Пример #52
0
from aquilon.config import Config

from aquilon.exceptions_ import AquilonError, IncompleteError
from aquilon.aqdb.model import Base, Resource, ResourceGroup, Cluster, Host
from aquilon.aqdb.db_factory import DbFactory
from aquilon.worker.templates.base import PlenaryCollection
from aquilon.worker.templates.resource import PlenaryResource
from aquilon.worker.templates.cluster import PlenaryCluster
from aquilon.worker.templates.host import PlenaryHost
from aquilon.worker.locks import CompileKey

db = DbFactory()
Base.metadata.bind = db.engine

session = db.Session()
config = Config()


def main():
    logging.basicConfig(level=logging.DEBUG)

    query = session.query(Resource)

    old_paths = []

    with CompileKey():
        for res in query.all():
            PlenaryResource(res).write(locked=True)

            holder = res.holder.holder_object
            if isinstance(holder, ResourceGroup):
Пример #53
0
class AQBroker(object):
    default_dir = os.path.dirname(__file__)
    default_twistd = os.path.realpath(os.path.join(default_dir,
                                                   '..', '..',
                                                   'bin', 'twistd.py'))
    default_configfile = os.path.realpath(os.path.join(default_dir,
                                                       'aqd.conf.scale'))

    def __init__(self, twistd=None, configfile=None):
        self.twistd = twistd or self.default_twistd
        self.configfile = configfile or self.default_configfile
        self.config = Config(configfile=self.configfile)
        self.pidfile = os.path.join(self.config.get("broker", "rundir"),
                                    "aqd.pid")
        self.logfile = self.config.get("broker", "logfile")
        self.coverage = os.path.join(self.config.get("broker", "logdir"),
                                     "aqd.coverage")

    def start(self, **kwargs):
        """Start a broker with the given config."""
        # FIXME: Make coverage configurable.
        args = [self.twistd, "--pidfile", self.pidfile,
                "--logfile", self.logfile,
                "aqd",
                # "--coverage", self.coverage,
                "--config", self.configfile]
        p = Popen(args, stdout=1, stderr=2)
        return p.wait()

    def stop(self, **kwargs):
        """Attempt to stop a running broker."""
        if os.path.exists(self.pidfile):
            f = file(self.pidfile)
            pid = f.readline()
            f.close()
            os.kill(int(pid), signal.SIGTERM)

    def get_aqservice(self):
        return self.config.get("broker", "service")

    def initialize(self, **kwargs):
        """Clear out and set up the base directory and database.
        
        Most of this was ripped straight from runtests.py.
        
        """
        p = Popen(self.config.get("kerberos", "krb5_keytab"),
                  stdout=1, stderr=2)
        if p.wait():
            raise ProcessException(code=p.returncode)

        for label in ["quattordir", "swrepdir", ]:
            dir = self.config.get("broker", label)
            if os.path.exists(dir):
                continue
            try:
                os.makedirs(dir)
            except OSError, e:
                print >>sys.stderr, "Could not create %s: %s" % (dir, e)
        
        dirs = [self.config.get("database", "dbdir")]
        for label in ["domainsdir", "kingdir", "rundir", "profilesdir",
                      "plenarydir", "logdir"]:
            dirs.append(self.config.get("broker", label))
        
        for dir in dirs:
            if os.path.exists(dir):
                print "Removing %s" % dir
                p = Popen(("/bin/rm", "-rf", dir), stdout=1, stderr=2)
                if p.wait():
                    raise ProcessException(code=p.returncode)
            try:
                os.makedirs(dir)
            except OSError, e:
                print >>sys.stderr, "Could not create %s: %s" % (dir, e)
Пример #54
0
class TestBrokerCommand(unittest.TestCase):
    def setUp(self):
        self.config = Config()
        self.net = DummyNetworks()

        # Need to import protocol buffers after we have the config
        # object all squared away and we can set the sys.path
        # variable appropriately.
        # It would be simpler just to change sys.path in runtests.py,
        # but this allows for each test to be run individually (without
        # the runtests.py wrapper).
        protodir = self.config.get("protocols", "directory")
        if protodir not in sys.path:
            sys.path.append(protodir)
        for m in [
                'aqdsystems_pb2', 'aqdnetworks_pb2', 'aqdservices_pb2',
                'aqddnsdomains_pb2', 'aqdlocations_pb2', 'aqdaudit_pb2',
                'aqdparamdefinitions_pb2', 'aqdparameters_pb2'
        ]:
            globals()[m] = __import__(m)

        self.user = self.config.get("broker", "user")
        self.sandboxdir = os.path.join(
            self.config.get("broker", "templatesdir"), self.user)
        self.template_extension = self.config.get("panc", "template_extension")

        # This method is cumbersome.  Should probably develop something
        # like unittest.conf.defaults.
        if self.config.has_option("unittest", "scratchdir"):
            self.scratchdir = self.config.get("unittest", "scratchdir")
            if not os.path.exists(self.scratchdir):
                os.makedirs(self.scratchdir)
        if self.config.has_option("unittest", "aurora_with_node"):
            self.aurora_with_node = self.config.get("unittest",
                                                    "aurora_with_node")
        else:
            self.aurora_with_node = "oyidb1622"
        if self.config.has_option("unittest", "aurora_without_node"):
            self.aurora_without_node = self.config.get("unittest",
                                                       "aurora_without_node")
        else:
            self.aurora_without_node = "pissp1"
        self.gzip_profiles = self.config.getboolean("panc", "gzip_output")
        self.profile_suffix = ".xml.gz" if self.gzip_profiles else ".xml"

        dsdb_coverage_dir = os.path.join(
            self.config.get("unittest", "scratchdir"), "dsdb_coverage")
        for name in [
                DSDB_EXPECT_SUCCESS_FILE, DSDB_EXPECT_FAILURE_FILE,
                DSDB_ISSUED_CMDS_FILE, DSDB_EXPECT_FAILURE_ERROR
        ]:
            path = os.path.join(dsdb_coverage_dir, name)
            try:
                os.remove(path)
            except OSError:
                pass

    def tearDown(self):
        pass

    def template_name(self, *template, **args):
        if args.get("sandbox", None):
            dir = os.path.join(self.sandboxdir, args.get("sandbox"))
        elif args.get("domain", None):
            dir = os.path.join(self.config.get("broker", "domainsdir"),
                               args.get("domain"))
        else:
            self.assert_(0, "template_name() called without domain or sandbox")
        return os.path.join(dir, *template) + self.template_extension

    def plenary_name(self, *template):
        dir = self.config.get("broker", "plenarydir")
        return os.path.join(dir, *template) + self.template_extension

    def find_template(self, *template, **args):
        """ Figure out the extension of an existing template """
        if args.get("sandbox", None):
            dir = os.path.join(self.sandboxdir, args.get("sandbox"))
        elif args.get("domain", None):
            dir = os.path.join(self.config.get("broker", "domainsdir"),
                               args.get("domain"))
        else:
            self.assert_(0, "find_template() called without domain or sandbox")

        base = os.path.join(dir, *template)

        for extension in [".tpl", ".pan"]:
            if os.path.exists(base + extension):
                return base + extension
        self.assert_(0, "template %s does not exist with any extension" % base)

    def build_profile_name(self, *template, **args):
        base = os.path.join(self.config.get("broker", "builddir"), "domains",
                            args.get("domain"), "profiles", *template)
        return base + self.template_extension

    msversion_dev_re = re.compile('WARNING:msversion:Loading \S* from dev\n')

    def runcommand(self, command, auth=True, **kwargs):
        aq = os.path.join(self.config.get("broker", "srcdir"), "bin", "aq.py")
        if auth:
            port = self.config.get("broker", "kncport")
        else:
            port = self.config.get("broker", "openport")
        if isinstance(command, list):
            args = [str(cmd) for cmd in command]
        else:
            args = [command]
        args.insert(0, sys.executable)
        args.insert(1, aq)
        if "--aqport" not in args:
            args.append("--aqport")
            args.append(port)
        if auth:
            args.append("--aqservice")
            args.append(self.config.get("broker", "service"))
        else:
            args.append("--noauth")
        if "env" in kwargs:
            # Make sure that kerberos tickets are still present if the
            # environment is being overridden...
            env = {}
            for (key, value) in kwargs["env"].items():
                env[key] = value
            for (key, value) in os.environ.items():
                if key.find("KRB") == 0 and key not in env:
                    env[key] = value
            if 'USER' not in env:
                env['USER'] = os.environ.get('USER', '')
            kwargs["env"] = env
        p = Popen(args, stdout=PIPE, stderr=PIPE, **kwargs)
        (out, err) = p.communicate()
        # Strip any msversion dev warnings out of STDERR
        err = self.msversion_dev_re.sub('', err)
        # Lock messages are pretty common...
        err = err.replace(
            'Client status messages disabled, '
            'retries exceeded.\n', '')
        err = LOCK_RE.sub('', err)
        return (p, out, err)

    def successtest(self, command, **kwargs):
        (p, out, err) = self.runcommand(command, **kwargs)
        self.assertEqual(
            p.returncode, 0, "Non-zero return code for %s, "
            "STDOUT:\n@@@\n'%s'\n@@@\n"
            "STDERR:\n@@@\n'%s'\n@@@\n" % (command, out, err))
        return (out, err)

    def statustest(self, command, **kwargs):
        (out, err) = self.successtest(command, **kwargs)
        self.assertEmptyOut(out, command)
        return err

    def failuretest(self, command, returncode, **kwargs):
        (p, out, err) = self.runcommand(command, **kwargs)
        self.assertEqual(
            p.returncode, returncode, "Non-%s return code %s for %s, "
            "STDOUT:\n@@@\n'%s'\n@@@\n"
            "STDERR:\n@@@\n'%s'\n@@@\n" %
            (returncode, p.returncode, command, out, err))
        return (out, err)

    def assertEmptyStream(self, name, contents, command):
        self.assertEqual(
            contents, "", "%s for %s was not empty:\n@@@\n'%s'\n@@@\n" %
            (name, command, contents))

    def assertEmptyErr(self, contents, command):
        self.assertEmptyStream("STDERR", contents, command)

    def assertEmptyOut(self, contents, command):
        self.assertEmptyStream("STDOUT", contents, command)

    def commandtest(self, command, **kwargs):
        (p, out, err) = self.runcommand(command, **kwargs)
        self.assertEmptyErr(err, command)
        self.assertEqual(
            p.returncode, 0,
            "Non-zero return code for %s, STDOUT:\n@@@\n'%s'\n@@@\n" %
            (command, out))
        return out

    def noouttest(self, command, **kwargs):
        out = self.commandtest(command, **kwargs)
        self.assertEqual(
            out, "",
            "STDOUT for %s was not empty:\n@@@\n'%s'\n@@@\n" % (command, out))

    def ignoreoutputtest(self, command, **kwargs):
        (p, out, err) = self.runcommand(command, **kwargs)
        # Ignore out/err unless we get a non-zero return code, then log it.
        self.assertEqual(
            p.returncode, 0,
            "Non-zero return code for %s, STDOUT:\n@@@\n'%s'\n@@@\nSTDERR:\n@@@\n'%s'\n@@@\n"
            % (command, out, err))
        return

    # Right now, commands are not implemented consistently.  When that is
    # addressed, this unit test should be updated.
    def notfoundtest(self, command, **kwargs):
        (p, out, err) = self.runcommand(command, **kwargs)
        if p.returncode == 0:
            self.assertEqual(
                err, "", "STDERR for %s was not empty:\n@@@\n'%s'\n@@@\n" %
                (command, err))
            self.assertEqual(
                out, "", "STDOUT for %s was not empty:\n@@@\n'%s'\n@@@\n" %
                (command, out))
        else:
            self.assertEqual(
                p.returncode, 4, "Return code for %s was %d instead of %d"
                "\nSTDOUT:\n@@@\n'%s'\n@@@"
                "\nSTDERR:\n@@@\n'%s'\n@@@" %
                (command, p.returncode, 4, out, err))
            self.assertEqual(
                out, "", "STDOUT for %s was not empty:\n@@@\n'%s'\n@@@\n" %
                (command, out))
            self.failUnless(
                err.find("Not Found") >= 0,
                "STDERR for %s did not include Not Found:"
                "\n@@@\n'%s'\n@@@\n" % (command, err))
        return err

    def badrequesttest(self, command, ignoreout=False, **kwargs):
        (p, out, err) = self.runcommand(command, **kwargs)
        self.assertEqual(
            p.returncode, 4, "Return code for %s was %d instead of %d"
            "\nSTDOUT:\n@@@\n'%s'\n@@@"
            "\nSTDERR:\n@@@\n'%s'\n@@@" % (command, p.returncode, 4, out, err))
        self.failUnless(
            err.find("Bad Request") >= 0,
            "STDERR for %s did not include Bad Request:"
            "\n@@@\n'%s'\n@@@\n" % (command, err))
        if not ignoreout and "--debug" not in command:
            self.assertEqual(
                out, "", "STDOUT for %s was not empty:\n@@@\n'%s'\n@@@\n" %
                (command, out))
        return err

    def unauthorizedtest(self, command, auth=False, msgcheck=True, **kwargs):
        (p, out, err) = self.runcommand(command, auth=auth, **kwargs)
        self.assertEqual(
            p.returncode, 4, "Return code for %s was %d instead of %d"
            "\nSTDOUT:\n@@@\n'%s'\n@@@"
            "\nSTDERR:\n@@@\n'%s'\n@@@" % (command, p.returncode, 4, out, err))
        self.assertEqual(
            out, "",
            "STDOUT for %s was not empty:\n@@@\n'%s'\n@@@\n" % (command, out))
        self.failUnless(
            err.find("Unauthorized:") >= 0,
            "STDERR for %s did not include Unauthorized:"
            "\n@@@\n'%s'\n@@@\n" % (command, err))
        if msgcheck:
            self.searchoutput(err, r"Unauthorized (anonymous )?access attempt",
                              command)
        return err

    def internalerrortest(self, command, **kwargs):
        (p, out, err) = self.runcommand(command, **kwargs)
        self.assertEqual(
            p.returncode, 5, "Return code for %s was %d instead of %d"
            "\nSTDOUT:\n@@@\n'%s'\n@@@"
            "\nSTDERR:\n@@@\n'%s'\n@@@" % (command, p.returncode, 5, out, err))
        self.assertEqual(
            out, "",
            "STDOUT for %s was not empty:\n@@@\n'%s'\n@@@\n" % (command, out))
        self.assertEqual(
            err.find("Internal Server Error"), 0,
            "STDERR for %s did not start with "
            "Internal Server Error:\n@@@\n'%s'\n@@@\n" % (command, err))
        return err

    def unimplementederrortest(self, command, **kwargs):
        (p, out, err) = self.runcommand(command, **kwargs)
        self.assertEqual(
            p.returncode, 5, "Return code for %s was %d instead of %d"
            "\nSTDOUT:\n@@@\n'%s'\n@@@"
            "\nSTDERR:\n@@@\n'%s'\n@@@" % (command, p.returncode, 5, out, err))
        self.assertEqual(
            out, "",
            "STDOUT for %s was not empty:\n@@@\n'%s'\n@@@\n" % (command, out))
        self.assertEqual(
            err.find("Not Implemented"), 0, "STDERR for %s did not start with "
            "Not Implemented:\n@@@\n'%s'\n@@@\n" % (command, err))
        return err

    # Test for conflicting or invalid aq client options.
    def badoptiontest(self, command, **kwargs):
        (p, out, err) = self.runcommand(command, **kwargs)
        self.assertEqual(
            p.returncode, 2, "Return code for %s was %d instead of %d"
            "\nSTDOUT:\n@@@\n'%s'\n@@@"
            "\nSTDERR:\n@@@\n'%s'\n@@@" % (command, p.returncode, 2, out, err))
        self.assertEqual(
            out, "",
            "STDOUT for %s was not empty:\n@@@\n'%s'\n@@@\n" % (command, out))
        return err

    def partialerrortest(self, command, **kwargs):
        # Currently these two cases behave the same way - same exit code
        # and behavior.
        return self.badoptiontest(command, **kwargs)

    def matchoutput(self, out, s, command):
        self.assert_(
            out.find(s) >= 0,
            "output for %s did not include '%s':\n@@@\n'%s'\n@@@\n" %
            (command, s, out))

    def matchclean(self, out, s, command):
        self.assert_(
            out.find(s) < 0, "output for %s includes '%s':\n@@@\n'%s'\n@@@\n" %
            (command, s, out))

    def searchoutput(self, out, r, command):
        if isinstance(r, str):
            m = re.search(r, out, re.MULTILINE)
        else:
            m = re.search(r, out)
        self.failUnless(
            m, "output for %s did not match '%s':\n@@@\n'%s'\n@@@\n" %
            (command, r, out))
        return m

    def searchclean(self, out, r, command):
        if isinstance(r, str):
            m = re.search(r, out, re.MULTILINE)
        else:
            m = re.search(r, out)
        self.failIf(
            m, "output for %s matches '%s':\n@@@\n'%s'\n@@@\n" %
            (command, r, out))

    def parse_proto_msg(self, listclass, attr, msg, expect=None):
        protolist = listclass()
        protolist.ParseFromString(msg)
        received = len(getattr(protolist, attr))
        if expect is None:
            self.failUnless(
                received > 0,
                "No %s listed in %s protobuf message\n" % (attr, listclass))
        else:
            self.failUnlessEqual(
                received, expect,
                "%d %s expected, got %d\n" % (expect, attr, received))
        return protolist

    def parse_netlist_msg(self, msg, expect=None):
        return self.parse_proto_msg(aqdnetworks_pb2.NetworkList, 'networks',
                                    msg, expect)

    def parse_hostlist_msg(self, msg, expect=None):
        return self.parse_proto_msg(aqdsystems_pb2.HostList, 'hosts', msg,
                                    expect)

    def parse_clusters_msg(self, msg, expect=None):
        return self.parse_proto_msg(aqdsystems_pb2.ClusterList, 'clusters',
                                    msg, expect)

    def parse_location_msg(self, msg, expect=None):
        return self.parse_proto_msg(aqdlocations_pb2.LocationList, 'locations',
                                    msg, expect)

    def parse_dns_domainlist_msg(self, msg, expect=None):
        return self.parse_proto_msg(aqddnsdomains_pb2.DNSDomainList,
                                    'dns_domains', msg, expect)

    def parse_service_msg(self, msg, expect=None):
        return self.parse_proto_msg(aqdservices_pb2.ServiceList, 'services',
                                    msg, expect)

    def parse_servicemap_msg(self, msg, expect=None):
        return self.parse_proto_msg(aqdservices_pb2.ServiceMapList,
                                    'servicemaps', msg, expect)

    def parse_personality_msg(self, msg, expect=None):
        return self.parse_proto_msg(aqdsystems_pb2.PersonalityList,
                                    'personalities', msg, expect)

    def parse_os_msg(self, msg, expect=None):
        return self.parse_proto_msg(aqdsystems_pb2.OperatingSystemList,
                                    'operating_systems', msg, expect)

    def parse_audit_msg(self, msg, expect=None):
        return self.parse_proto_msg(aqdaudit_pb2.TransactionList,
                                    'transactions', msg, expect)

    def parse_resourcelist_msg(self, msg, expect=None):
        return self.parse_proto_msg(aqdsystems_pb2.ResourceList, 'resources',
                                    msg, expect)

    def parse_paramdefinition_msg(self, msg, expect=None):
        return self.parse_proto_msg(
            aqdparamdefinitions_pb2.ParamDefinitionList, 'param_definitions',
            msg, expect)

    def parse_parameters_msg(self, msg, expect=None):
        return self.parse_proto_msg(aqdparameters_pb2.ParameterList,
                                    'parameters', msg, expect)

    def gitenv(self, env=None):
        """Configure a known sanitised environment"""
        git_path = self.config.get("broker", "git_path")
        # The "publish" test abuses gitenv(), and it needs the Python interpreter
        # in the path, because it runs the template unit tests which in turn
        # call the aq command
        python_path = os.path.dirname(sys.executable)
        newenv = {}
        newenv["USER"] = os.environ.get('USER', '')
        if env:
            for (key, value) in env.iteritems():
                newenv[key] = value
        if "PATH" in newenv:
            newenv["PATH"] = "%s:%s:%s" % (git_path, python_path,
                                           newenv["PATH"])
        else:
            newenv["PATH"] = "%s:%s:%s" % (git_path, python_path,
                                           '/bin:/usr/bin')
        return newenv

    def gitcommand_raw(self, command, **kwargs):
        if isinstance(command, list):
            args = command[:]
        else:
            args = [command]
        args.insert(0, "git")
        env = self.gitenv(kwargs.pop("env", None))
        p = Popen(args, stdout=PIPE, stderr=PIPE, env=env, **kwargs)
        return p

    def gitcommand(self, command, **kwargs):
        p = self.gitcommand_raw(command, **kwargs)
        # Ignore out/err unless we get a non-zero return code, then log it.
        (out, err) = p.communicate()
        self.assertEqual(
            p.returncode, 0,
            "Non-zero return code for %s, STDOUT:\n@@@\n'%s'\n@@@\nSTDERR:\n@@@\n'%s'\n@@@\n"
            % (command, out, err))
        return (out, err)

    def gitcommand_expectfailure(self, command, **kwargs):
        p = self.gitcommand_raw(command, **kwargs)
        # Ignore out/err unless we get a non-zero return code, then log it.
        (out, err) = p.communicate()
        self.failIfEqual(
            p.returncode, 0,
            "Zero return code for %s, STDOUT:\n@@@\n'%s'\n@@@\nSTDERR:\n@@@\n'%s'\n@@@\n"
            % (command, out, err))
        return (out, err)

    def check_git_merge_health(self, repo):
        command = "merge HEAD"
        out = self.gitcommand(command.split(" "), cwd=repo)
        return

    def grepcommand(self, command, **kwargs):
        if self.config.has_option("unittest", "grep"):
            grep = self.config.get("unittest", "grep")
        else:
            grep = "/bin/grep"
        if isinstance(command, list):
            args = command[:]
        else:
            args = [command]
        args.insert(0, grep)
        env = {}
        p = Popen(args, stdout=PIPE, stderr=PIPE, **kwargs)
        (out, err) = p.communicate()
        # Ignore out/err unless we get a non-zero return code, then log it.
        if p.returncode == 0:
            return out.splitlines()
        if p.returncode == 1:
            return []
        self.fail("Error return code for %s, "
                  "STDOUT:\n@@@\n'%s'\n@@@\nSTDERR:\n@@@\n'%s'\n@@@\n" %
                  (command, out, err))

    def findcommand(self, command, **kwargs):
        if self.config.has_option("unittest", "find"):
            find = self.config.get("unittest", "find")
        else:
            find = "/usr/bin/find"
        if isinstance(command, list):
            args = command[:]
        else:
            args = [command]
        args.insert(0, find)
        env = {}
        p = Popen(args, stdout=PIPE, stderr=PIPE, **kwargs)
        (out, err) = p.communicate()
        # Ignore out/err unless we get a non-zero return code, then log it.
        if p.returncode == 0:
            return out.splitlines()
        self.fail("Error return code for %s, "
                  "STDOUT:\n@@@\n'%s'\n@@@\nSTDERR:\n@@@\n'%s'\n@@@\n" %
                  (command, out, err))

    def writescratch(self, filename, contents):
        scratchfile = os.path.join(self.scratchdir, filename)
        with open(scratchfile, 'w') as f:
            f.write(contents)
        return scratchfile

    def readscratch(self, filename):
        scratchfile = os.path.join(self.scratchdir, filename)
        with open(scratchfile, 'r') as f:
            contents = f.read()
        return contents

    def dsdb_expect(self, command, fail=False, errstr=""):
        dsdb_coverage_dir = os.path.join(
            self.config.get("unittest", "scratchdir"), "dsdb_coverage")
        if fail:
            filename = DSDB_EXPECT_FAILURE_FILE
        else:
            filename = DSDB_EXPECT_SUCCESS_FILE

        expected_name = os.path.join(dsdb_coverage_dir, filename)
        with open(expected_name, "a") as fp:
            if isinstance(command, list):
                fp.write(" ".join([str(cmd) for cmd in command]))
            else:
                fp.write(str(command))
            fp.write("\n")
        if fail and errstr:
            errfile = DSDB_EXPECT_FAILURE_ERROR
            expected_name = os.path.join(dsdb_coverage_dir, errfile)
            with open(expected_name, "a") as fp:
                fp.write(errstr)
                fp.write("\n")

    def dsdb_expect_add(self,
                        hostname,
                        ip,
                        interface=None,
                        mac=None,
                        primary=None,
                        comments=None,
                        fail=False):
        command = [
            "add_host", "-host_name", hostname, "-ip_address",
            str(ip), "-status", "aq"
        ]
        if interface:
            command.extend(
                ["-interface_name",
                 str(interface).replace('/', '_')])
        if mac:
            command.extend(["-ethernet_address", str(mac)])
        if primary:
            command.extend(["-primary_host_name", primary])
        if comments:
            command.extend(["-comments", comments])

        self.dsdb_expect(" ".join(command), fail=fail)

    def dsdb_expect_delete(self, ip, fail=False):
        self.dsdb_expect("delete_host -ip_address %s" % ip, fail=fail)

    def dsdb_expect_update(self,
                           fqdn,
                           iface=None,
                           ip=None,
                           mac=None,
                           comments=None,
                           fail=False):
        command = ["update_aqd_host", "-host_name", fqdn]
        if iface:
            command.extend(["-interface_name", iface])
        if ip:
            command.extend(["-ip_address", str(ip)])
        if mac:
            command.extend(["-ethernet_address", str(mac)])
        if comments:
            command.extend(["-comments", comments])
        self.dsdb_expect(" ".join(command), fail=fail)

    def dsdb_expect_rename(self,
                           fqdn,
                           new_fqdn=None,
                           iface=None,
                           new_iface=None,
                           fail=False):
        command = ["update_aqd_host", "-host_name", fqdn]
        if new_fqdn:
            command.extend(["-primary_host_name", new_fqdn])
        if iface:
            command.extend(["-interface_name", iface])
        if new_iface:
            command.extend(["-new_interface_name", new_iface])
        self.dsdb_expect(" ".join(command), fail=fail)

    def dsdb_expect_add_campus(self,
                               campus,
                               comments=None,
                               fail=False,
                               errstr=""):
        command = ["add_campus_aq", "-campus_name", campus]
        if comments:
            command.extend(["-comments", comments])
        self.dsdb_expect(" ".join(command), fail=fail, errstr=errstr)

    def dsdb_expect_del_campus(self, campus, fail=False, errstr=""):
        command = ["delete_campus_aq", "-campus", campus]
        self.dsdb_expect(" ".join(command), fail=fail, errstr=errstr)

    def dsdb_expect_add_campus_building(self,
                                        campus,
                                        building,
                                        fail=False,
                                        errstr=""):
        command = [
            "add_campus_building_aq", "-campus_name", campus, "-building_name",
            building
        ]
        self.dsdb_expect(" ".join(command), fail=fail, errstr=errstr)

    def dsdb_expect_del_campus_building(self,
                                        campus,
                                        building,
                                        fail=False,
                                        errstr=""):
        command = [
            "delete_campus_building_aq", "-campus_name", campus,
            "-building_name", building
        ]
        self.dsdb_expect(" ".join(command), fail=fail, errstr=errstr)

    def dsdb_verify(self, empty=False):
        dsdb_coverage_dir = os.path.join(
            self.config.get("unittest", "scratchdir"), "dsdb_coverage")
        fail_expected_name = os.path.join(dsdb_coverage_dir,
                                          DSDB_EXPECT_FAILURE_FILE)
        issued_name = os.path.join(dsdb_coverage_dir, DSDB_ISSUED_CMDS_FILE)

        expected = {}
        for filename in [DSDB_EXPECT_SUCCESS_FILE, DSDB_EXPECT_FAILURE_FILE]:
            expected_name = os.path.join(dsdb_coverage_dir, filename)
            try:
                with open(expected_name, "r") as fp:
                    for line in fp:
                        expected[line.rstrip("\n")] = True
            except IOError:
                pass

        # This is likely a logic error in the test
        if not expected and not empty:
            self.fail("dsdb_verify() called when no DSDB commands were "
                      "expected?!?")

        issued = {}
        try:
            with open(issued_name, "r") as fp:
                for line in fp:
                    issued[line.rstrip("\n")] = True
        except IOError:
            pass

        errors = []
        for cmd, dummy in expected.items():
            if cmd not in issued:
                errors.append("'%s'" % cmd)
        # Unexpected DSDB commands are caught by the fake_dsdb script

        if errors:
            self.fail("The following expected DSDB commands were not called:"
                      "\n@@@\n%s\n@@@\n" % "\n".join(errors))

    def verify_buildfiles(self,
                          domain,
                          object,
                          want_exist=True,
                          command='manage'):
        qdir = self.config.get('broker', 'quattordir')
        domaindir = os.path.join(qdir, 'build', 'xml', domain)
        xmlfile = os.path.join(domaindir, object + self.profile_suffix)
        depfile = os.path.join(domaindir, object + '.dep')
        builddir = self.config.get('broker', 'builddir')
        profile = os.path.join(builddir, 'domains', domain, 'profiles',
                               object + self.template_extension)
        for f in [xmlfile, depfile, profile]:
            if want_exist:
                self.failUnless(
                    os.path.exists(f),
                    "Expecting %s to exist before running %s." % (f, command))
            else:
                self.failIf(
                    os.path.exists(f),
                    "Not expecting %s to exist after running %s." %
                    (f, command))

    def demote_current_user(self, role="nobody"):
        principal = self.config.get('unittest', 'principal')
        command = ["permission", "--role", role, "--principal", principal]
        self.noouttest(command)

    def promote_current_user(self):
        srcdir = self.config.get("broker", "srcdir")
        add_admin = os.path.join(srcdir, "tests", "aqdb", "add_admin.py")
        env = os.environ.copy()
        env['AQDCONF'] = self.config.baseconfig
        p = Popen([add_admin], stdout=PIPE, stderr=PIPE, env=env)
        (out, err) = p.communicate()
        self.assertEqual(
            p.returncode, 0,
            "Failed to restore admin privs '%s', '%s'." % (out, err))
Пример #55
0
opts = parser.parse_args()

if not os.path.exists(opts.config):
    print >> sys.stderr, "configfile %s does not exist" % opts.config
    sys.exit(1)

if os.environ.get("AQDCONF") and (os.path.realpath(opts.config)
        != os.path.realpath(os.environ["AQDCONF"])):
    force_yes("""Will ignore AQDCONF variable value:
%s
and use
%s
instead.""" % (os.environ["AQDCONF"], opts.config))

config = Config(configfile=opts.config)
if not config.has_section("unittest"):
    config.add_section("unittest")
if not config.has_option("unittest", "srcdir"):
    config.set("unittest", "srcdir", SRCDIR)
if opts.coverage:
    config.set("unittest", "coverage", "True")
if opts.profile:
    config.set("unittest", "profile", "True")

hostname = config.get("unittest", "hostname")
if hostname.find(".") < 0:
    print >> sys.stderr, """
Some regression tests depend on the config value for hostname to be
fully qualified.  Please set the config value manually since the default
on this system (%s) is a short name.
Пример #56
0
 def __init__(self, dbsandbox, dbauthor):
     self.dbsandbox = dbsandbox
     self.dbauthor = dbauthor
     config = Config()
     templatesdir = config.get('broker', 'templatesdir')
     self.path = os.path.join(templatesdir, dbauthor.name, dbsandbox.name)