예제 #1
0
파일: resources.py 프로젝트: ned21/aquilon
def del_resource(session, logger, dbresource, dsdb_callback=None, **arguments):
    holder = dbresource.holder
    holder_plenary = Plenary.get_plenary(holder.holder_object, logger=logger)
    remove_plenary = Plenary.get_plenary(dbresource, logger=logger)

    domain = holder.holder_object.branch.name

    holder.resources.remove(dbresource)
    session.flush()

    key = CompileKey.merge(
        [remove_plenary.get_remove_key(),
         holder_plenary.get_write_key()])
    try:
        lock_queue.acquire(key)
        remove_plenary.stash()
        try:
            holder_plenary.write(locked=True)
        except IncompleteError:
            holder_plenary.cleanup(domain, locked=True)

        remove_plenary.remove(locked=True)

        if dsdb_callback:
            dsdb_callback(session, logger, holder, dbresource, **arguments)
    except:
        holder_plenary.restore_stash()
        remove_plenary.restore_stash()
        raise
    finally:
        lock_queue.release(key)

    return
예제 #2
0
    def render(self, session, logger, city, country, fullname, comments,
               timezone, campus, **arguments):
        if country:
            dbparent = Country.get_unique(session, country, compel=True)
        else:
            dbparent = Campus.get_unique(session, campus, compel=True)

        dbcity = add_location(session,
                              City,
                              city,
                              dbparent,
                              fullname=fullname,
                              comments=comments,
                              timezone=timezone)

        session.flush()

        plenary = PlenaryCity(dbcity, logger=logger)
        key = plenary.get_write_key()
        try:
            lock_queue.acquire(key)
            plenary.write(locked=True)

            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.add_city(city, dbcity.country.name, fullname)
            dsdb_runner.commit_or_rollback()

        except:
            plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)
예제 #3
0
    def render(self, session, logger, cluster, buildstatus, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbstatus = ClusterLifecycle.get_unique(
            session, buildstatus, compel=True)

        if not dbcluster.status.transition(dbcluster, dbstatus):
            return

        if not dbcluster.personality.archetype.is_compileable:
            return

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        for dbhost in dbcluster.hosts:
            plenaries.append(Plenary.get_plenary(dbhost))

        # Force a host lock as pan might overwrite the profile...
        key = CompileKey(domain=dbcluster.branch.name, logger=logger)
        try:
            lock_queue.acquire(key)

            plenaries.write(locked=True)
            td = TemplateDomain(
                dbcluster.branch, dbcluster.sandbox_author, logger=logger)
            td.compile(session, plenaries.object_templates, locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)
        return
예제 #4
0
    def render(self, session, logger, machine, disk, controller, size, all,
               dbuser, **arguments):

        # Handle deprecated arguments
        if arguments.get("type", None):
            self.deprecated_option("type",
                                   "Please use --controller instead.",
                                   logger=logger,
                                   **arguments)
            controller = arguments["type"]
        if arguments.get("capacity", None):
            self.deprecated_option("capacity",
                                   "Please use --size instead.",
                                   logger=logger,
                                   **arguments)
            size = arguments["capacity"]

        dbmachine = Machine.get_unique(session, machine, compel=True)
        q = session.query(Disk).filter_by(machine=dbmachine)
        if disk:
            q = q.filter_by(device_name=disk)
        if controller:
            if controller not in controller_types:
                raise ArgumentError("%s is not a valid controller type, use "
                                    "one of: %s." %
                                    (controller, ", ".join(controller_types)))
            q = q.filter_by(controller_type=controller)
        if size is not None:
            q = q.filter_by(capacity=size)
        results = q.all()

        if len(results) == 0:
            raise NotFoundException("No disks found.")
        elif len(results) > 1 and not all:
            raise ArgumentError("More than one matching disks found.  "
                                "Use --all to delete them all.")
        for result in results:
            session.delete(result)

        session.flush()
        session.expire(dbmachine, ['disks'])

        plenary_machine = Plenary.get_plenary(dbmachine, logger=logger)
        key = plenary_machine.get_write_key()
        dbcontainer = dbmachine.vm_container
        if dbcontainer:
            plenary_container = Plenary.get_plenary(dbcontainer, logger=logger)
            key = CompileKey.merge([key, plenary_container.get_write_key()])
        try:
            lock_queue.acquire(key)
            if dbcontainer:
                plenary_container.write(locked=True)
            plenary_machine.write(locked=True)
        except:
            plenary_machine.restore_stash()
            if dbcontainer:
                plenary_container.restore_stash()
            raise
        finally:
            lock_queue.release(key)
예제 #5
0
파일: base.py 프로젝트: piojo/aquilon
 def write(self, locked=False):
     # If locked is True, assume error handling happens higher
     # in the stack.
     total = 0
     # Pre-stash all plenaries before attempting to write any
     # of them.  This way if an error occurs all can go through
     # the same restore logic.
     self.stash()
     key = None
     try:
         if not locked:
             key = self.get_key()
             lock_queue.acquire(key)
         for plen in self.plenaries:
             # IncompleteError is almost pointless in this context, but
             # it has the nice side effect of not updating the total.
             try:
                 total += plen.write(locked=True)
             except IncompleteError, err:
                 self.logger.client_info("Warning: %s" % err)
     except:
         if not locked:
             self.restore_stash()
         raise
     finally:
         if not locked:
             lock_queue.release(key)
     return total
예제 #6
0
    def render(self, session, logger, hostname, buildstatus, **arguments):
        dbhost = hostname_to_host(session, hostname)
        dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True)
        changed = dbhost.status.transition(dbhost, dbstatus)

        if not changed or not dbhost.archetype.is_compileable:
            return

        session.add(dbhost)
        session.flush()

        plenary = PlenaryHost(dbhost, logger=logger)
        # Force a host lock as pan might overwrite the profile...
        key = CompileKey(domain=dbhost.branch.name,
                         profile=dbhost.fqdn,
                         logger=logger)
        try:
            lock_queue.acquire(key)
            plenary.write(locked=True)
            td = TemplateDomain(dbhost.branch,
                                dbhost.sandbox_author,
                                logger=logger)
            td.compile(session, only=[dbhost.fqdn], locked=True)
        except IncompleteError:
            raise ArgumentError("Run aq make for host %s first." % dbhost.fqdn)
        except:
            plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)
예제 #7
0
파일: make.py 프로젝트: jrha/aquilon
    def compile(self, session, dbhost, logger, keepbindings):
        chooser = Chooser(dbhost, logger=logger,
                          required_only=not(keepbindings))
        chooser.set_required()
        chooser.flush_changes()

        hosts = chooser.changed_server_fqdns()
        hosts.add(dbhost.fqdn)

        # Force a host lock as pan might overwrite the profile...
        key = chooser.get_write_key()
        for fqdn in hosts:
            key = CompileKey.merge([key, CompileKey(domain=dbhost.branch.name,
                                                    profile=fqdn,
                                                    logger=logger)])
        try:
            lock_queue.acquire(key)
            chooser.write_plenary_templates(locked=True)

            td = TemplateDomain(dbhost.branch, dbhost.sandbox_author,
                                logger=logger)
            td.compile(session, only=hosts, locked=True)

        except:
            if chooser:
                chooser.restore_stash()

            # Okay, cleaned up templates, make sure the caller knows
            # we've aborted so that DB can be appropriately rollback'd.
            raise

        finally:
            lock_queue.release(key)

        return
예제 #8
0
파일: del_city.py 프로젝트: ned21/aquilon
    def render(self, session, logger, city, **arguments):
        dbcity = get_location(session, city=city)

        name = dbcity.name
        country = dbcity.country.name
        fullname = dbcity.fullname

        plenary = PlenaryCity(dbcity, logger=logger)
        CommandDelLocation.render(self, session=session, name=city,
                                  type='city', **arguments)
        session.flush()

        key = plenary.get_remove_key()
        try:
            lock_queue.acquire(key)
            plenary.remove(locked=True)
            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.del_city(name, country, fullname)
            dsdb_runner.commit_or_rollback()
        except:
            plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
예제 #9
0
    def render(self, session, logger, hostname, buildstatus, **arguments):
        dbhost = hostname_to_host(session, hostname)
        dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True)
        changed = dbhost.status.transition(dbhost, dbstatus)

        if not changed or not dbhost.archetype.is_compileable:
            return

        session.add(dbhost)
        session.flush()

        plenary = PlenaryHost(dbhost, logger=logger)
        # Force a host lock as pan might overwrite the profile...
        key = CompileKey(domain=dbhost.branch.name, profile=dbhost.fqdn,
                         logger=logger)
        try:
            lock_queue.acquire(key)
            plenary.write(locked=True)
            td = TemplateDomain(dbhost.branch, dbhost.sandbox_author,
                                logger=logger)
            td.compile(session, only=[dbhost.fqdn], locked=True)
        except IncompleteError:
            raise ArgumentError("Run aq make for host %s first." % dbhost.fqdn)
        except:
            plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)
예제 #10
0
파일: add_city.py 프로젝트: jrha/aquilon
    def render(self, session, logger, city, country, fullname, comments,
               timezone, campus,
               **arguments):
        if country:
            dbparent = Country.get_unique(session, country, compel=True)
        else:
            dbparent = Campus.get_unique(session, campus, compel=True)

        dbcity = add_location(session, City, city, dbparent, fullname=fullname,
                              comments=comments, timezone=timezone)

        session.flush()

        plenary = PlenaryCity(dbcity, logger=logger)
        key = plenary.get_write_key()
        try:
            lock_queue.acquire(key)
            plenary.write(locked=True)

            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.add_city(city, dbcity.country.name, fullname)
            dsdb_runner.commit_or_rollback()

        except:
            plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)
예제 #11
0
파일: resources.py 프로젝트: jrha/aquilon
def add_resource(session, logger, holder, dbresource, dsdb_callback=None,
                 **arguments):
    if dbresource not in holder.resources:
        holder.resources.append(dbresource)

    holder_plenary = Plenary.get_plenary(holder.holder_object, logger=logger)
    res_plenary = Plenary.get_plenary(dbresource, logger=logger)

    domain = holder.holder_object.branch.name

    session.flush()

    key = CompileKey.merge([res_plenary.get_write_key(),
                            holder_plenary.get_write_key()])
    try:
        lock_queue.acquire(key)
        res_plenary.write(locked=True)
        try:
            holder_plenary.write(locked=True)
        except IncompleteError:
            holder_plenary.cleanup(domain, locked=True)

        if dsdb_callback:
            dsdb_callback(session, logger, dbresource, **arguments)

    except:
        res_plenary.restore_stash()
        holder_plenary.restore_stash()
        raise
    finally:
        lock_queue.release(key)

    return
예제 #12
0
    def render(self, session, logger, building, dryrun, incremental,
               **arguments):
        if building:
            dbbuilding = get_location(session, building=building)
        else:
            dbbuilding = None

        # --dryrun and --incremental do not mix well
        if dryrun and incremental:
            raise ArgumentError("--dryrun and --incremental cannot be given "
                                "simultaneously.")

        key = SyncKey(data="network", logger=logger)
        lock_queue.acquire(key)

        rundir = self.config.get("broker", "rundir")
        tempdir = mkdtemp(prefix="refresh_network_", dir=rundir)
        try:
            args = [self.config.get("broker", "qip_dump_subnetdata"),
                    "--datarootdir", tempdir, "--format", "txt", "--noaudit"]
            run_command(args, logger=logger)

            subnetdata = file(os.path.join(tempdir, "subnetdata.txt"), "r")
            refresher = QIPRefresh(session, logger, dbbuilding, dryrun, incremental)
            refresher.refresh(subnetdata)

            session.flush()

            if dryrun:
                session.rollback()
        finally:
            lock_queue.release(key)
            remove_dir(tempdir, logger=logger)
예제 #13
0
파일: base.py 프로젝트: ned21/aquilon
 def write(self, locked=False, content=None):
     # If locked is True, assume error handling happens higher
     # in the stack.
     total = 0
     # Pre-stash all plenaries before attempting to write any
     # of them.  This way if an error occurs all can go through
     # the same restore logic.
     self.stash()
     key = None
     try:
         if not locked:
             key = self.get_write_key()
             lock_queue.acquire(key)
         for plen in self.plenaries:
             # IncompleteError is almost pointless in this context, but
             # it has the nice side effect of not updating the total.
             try:
                 total += plen.write(locked=True, content=content)
             except IncompleteError, err:
                 self.logger.client_info("Warning: %s" % err)
     except:
         if not locked:
             self.restore_stash()
         raise
     finally:
         if not locked:
             lock_queue.release(key)
     return total
예제 #14
0
파일: processes.py 프로젝트: piojo/aquilon
def sync_domain(dbdomain, logger=LOGGER, locked=False):
    """Update templates on disk to match contents of branch in template-king.

    If this domain is tracking another, first update the branch in
    template-king with the latest from the tracking branch.  Also save
    the current (previous) commit as a potential rollback point.

    """
    config = Config()
    kingdir = config.get("broker", "kingdir")
    domaindir = os.path.join(config.get("broker", "domainsdir"), dbdomain.name)
    git_env = {"PATH": "%s:%s" % (config.get("broker", "git_path"),
                                  os.environ.get("PATH", ""))}
    if dbdomain.tracked_branch:
        # Might need to revisit if using this helper from rollback...
        run_command(["git", "push", ".",
                     "%s:%s" % (dbdomain.tracked_branch.name, dbdomain.name)],
                    path=kingdir, env=git_env, logger=logger)
    run_command(["git", "fetch", "--prune"], path=domaindir, env=git_env, logger=logger)
    if dbdomain.tracked_branch:
        out = run_command(["git", "rev-list", "-n", "1", "HEAD"],
                          path=domaindir, env=git_env, logger=logger)
        rollback_commit = out.strip()
    try:
        if not locked:
            key = CompileKey(domain=dbdomain.name, logger=logger)
            lock_queue.acquire(key)
        run_command(["git", "reset", "--hard", "origin/%s" % dbdomain.name],
                    path=domaindir, env=git_env, logger=logger)
    finally:
        if not locked:
            lock_queue.release(key)
    if dbdomain.tracked_branch:
        dbdomain.rollback_commit = rollback_commit
예제 #15
0
    def render(self, session, logger, metacluster, max_members,
               fix_location, high_availability, comments, **arguments):
        dbmetacluster = MetaCluster.get_unique(session, metacluster,
                                               compel=True)
        cluster_updated = False

        if max_members is not None:
            current_members = len(dbmetacluster.members)
            if max_members < current_members:
                raise ArgumentError("%s has %d clusters bound, which exceeds "
                                    "the requested limit %d." %
                                    (format(dbmetacluster), current_members,
                                     max_members))
            dbmetacluster.max_clusters = max_members
            cluster_updated = True

        if comments is not None:
            dbmetacluster.comments = comments
            cluster_updated = True

        if high_availability is not None:
            dbmetacluster.high_availability = high_availability
            cluster_updated = True

        # TODO update_cluster_location would update VMs. Metaclusters
        # will contain VMs in Vulcan2 model.
        plenaries = PlenaryCollection(logger=logger)
        remove_plenaries = PlenaryCollection(logger=logger)

        location_updated = update_cluster_location(session, logger,
                                          dbmetacluster,
                                          fix_location,
                                          plenaries, remove_plenaries,
                                          **arguments)

        if location_updated:
            cluster_updated = True

        if not cluster_updated:
            return

        session.add(dbmetacluster)
        session.flush()
        dbmetacluster.validate()

        plenary_info = Plenary.get_plenary(dbmetacluster, logger=logger)
        key = plenary_info.get_write_key()

        try:
            lock_queue.acquire(key)

            plenary_info.write(locked=True)
        except:
            plenary_info.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
예제 #16
0
파일: del_switch.py 프로젝트: ned21/aquilon
    def render(self, session, logger, switch, **arguments):
        dbswitch = Switch.get_unique(session, switch, compel=True)

        # Check and complain if the switch has any other addresses than its
        # primary address
        addrs = []
        for addr in dbswitch.all_addresses():
            if addr.ip == dbswitch.primary_ip:
                continue
            addrs.append(str(addr.ip))
        if addrs:
            raise ArgumentError("{0} still provides the following addresses, "
                                "delete them first: {1}.".format
                                (dbswitch, ", ".join(addrs)))

        dbdns_rec = dbswitch.primary_name
        ip = dbswitch.primary_ip
        old_fqdn = str(dbswitch.primary_name.fqdn)
        old_comments = dbswitch.comments
        session.delete(dbswitch)
        if dbdns_rec:
            delete_dns_record(dbdns_rec)

        session.flush()

        # Any switch ports hanging off this switch should be deleted with
        # the cascade delete of the switch.

        switch_plenary = Plenary.get_plenary(dbswitch, logger=logger)

        # clusters connected to this switch
        plenaries = PlenaryCollection(logger=logger)

        for dbcluster in dbswitch.esx_clusters:
            plenaries.append(Plenary.get_plenary(dbcluster))

        key = CompileKey.merge([switch_plenary.get_remove_key(),
                                plenaries.get_write_key()])

        try:
            lock_queue.acquire(key)
            switch_plenary.stash()
            plenaries.write(locked=True)
            switch_plenary.remove(locked=True)

            if ip:
                dsdb_runner = DSDBRunner(logger=logger)
                # FIXME: restore interface name/MAC on rollback
                dsdb_runner.delete_host_details(old_fqdn, ip, comments=old_comments)
                dsdb_runner.commit_or_rollback("Could not remove switch from DSDB")
            return

        except:
            plenaries.restore_stash()
            switch_plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)
예제 #17
0
파일: del_disk.py 프로젝트: jrha/aquilon
    def render(self, session, logger, machine, disk, controller, size, all,
               dbuser, **arguments):

        # Handle deprecated arguments
        if arguments.get("type", None):
            self.deprecated_option("type", "Please use --controller instead.",
                                   logger=logger, **arguments)
            controller = arguments["type"]
        if arguments.get("capacity", None):
            self.deprecated_option("capacity", "Please use --size instead.",
                                   logger=logger, **arguments)
            size = arguments["capacity"]

        dbmachine = Machine.get_unique(session, machine, compel=True)
        q = session.query(Disk).filter_by(machine=dbmachine)
        if disk:
            q = q.filter_by(device_name=disk)
        if controller:
            if controller not in controller_types:
                raise ArgumentError("%s is not a valid controller type, use "
                                    "one of: %s." % (controller,
                                                     ", ".join(controller_types)
                                                     ))
            q = q.filter_by(controller_type=controller)
        if size is not None:
            q = q.filter_by(capacity=size)
        results = q.all()

        if len(results) == 0:
            raise NotFoundException("No disks found.")
        elif len(results) > 1 and not all:
            raise ArgumentError("More than one matching disks found.  "
                                "Use --all to delete them all.")
        for result in results:
            session.delete(result)

        session.flush()
        session.expire(dbmachine, ['disks'])

        plenary_machine = Plenary.get_plenary(dbmachine, logger=logger)
        key = plenary_machine.get_write_key()
        dbcontainer = dbmachine.vm_container
        if dbcontainer:
            plenary_container = Plenary.get_plenary(dbcontainer, logger=logger)
            key = CompileKey.merge([key, plenary_container.get_write_key()])
        try:
            lock_queue.acquire(key)
            if dbcontainer:
                plenary_container.write(locked=True)
            plenary_machine.write(locked=True)
        except:
            plenary_machine.restore_stash()
            if dbcontainer:
                plenary_container.restore_stash()
            raise
        finally:
            lock_queue.release(key)
예제 #18
0
파일: base.py 프로젝트: jrha/aquilon
    def cleanup(self, domain, locked=False):
        """
        remove all files related to an object template including
        any intermediate build files
        """
        key = None
        try:
            if not locked:
                key = self.get_remove_key()
                lock_queue.acquire(key)

            if self.template_type == "object":
                # Can't call remove() here because it relies on the new domain.
                qdir = self.config.get("broker", "quattordir")
                # Only one or the other of .xml/.xml.gz should be there...
                # it doesn't hurt to clean up both.
                xmldir = os.path.join(qdir, "build", "xml", domain,
                                      self.plenary_core)
                xmlfile = os.path.join(xmldir, self.plenary_template + ".xml")
                remove_file(xmlfile, logger=self.logger)
                xmlgzfile = xmlfile + ".gz"
                remove_file(xmlgzfile, logger=self.logger)
                # Name used up to and including panc 9.2
                depfile = xmlfile + ".dep"
                remove_file(depfile, logger=self.logger)
                # Name used by panc 9.4 and higher
                depfile = os.path.join(xmldir, self.plenary_template + ".dep")
                remove_file(depfile, logger=self.logger)
                try:
                    os.removedirs(xmldir)
                except OSError:
                    pass

                builddir = self.config.get("broker", "builddir")
                maindir = os.path.join(builddir, "domains", domain,
                                       "profiles", self.plenary_core)
                mainfile = os.path.join(maindir, self.plenary_template +
                                        TEMPLATE_EXTENSION)
                remove_file(mainfile, logger=self.logger)
                try:
                    os.removedirs(maindir)
                except OSError:
                    pass
                self.removed = True
            else:
                # Non-object templates do not depend on the domain, so calling
                # remove() is fine
                self.remove(locked=True)
        except:
            if not locked:
                self.restore_stash()
            raise
        finally:
            if not locked:
                lock_queue.release(key)
예제 #19
0
파일: base.py 프로젝트: ned21/aquilon
    def cleanup(self, domain, locked=False):
        """
        remove all files related to an object template including
        any intermediate build files
        """
        key = None
        try:
            if not locked:
                key = self.get_remove_key()
                lock_queue.acquire(key)

            if self.template_type == "object":
                # Can't call remove() here because it relies on the new domain.
                qdir = self.config.get("broker", "quattordir")
                # Only one or the other of .xml/.xml.gz should be there...
                # it doesn't hurt to clean up both.
                xmldir = os.path.join(qdir, "build", "xml", domain,
                                      self.plenary_core)
                xmlfile = os.path.join(xmldir, self.plenary_template + ".xml")
                remove_file(xmlfile, logger=self.logger)
                xmlgzfile = xmlfile + ".gz"
                remove_file(xmlgzfile, logger=self.logger)
                # Name used up to and including panc 9.2
                depfile = xmlfile + ".dep"
                remove_file(depfile, logger=self.logger)
                # Name used by panc 9.4 and higher
                depfile = os.path.join(xmldir, self.plenary_template + ".dep")
                remove_file(depfile, logger=self.logger)
                try:
                    os.removedirs(xmldir)
                except OSError:
                    pass

                builddir = self.config.get("broker", "builddir")
                maindir = os.path.join(builddir, "domains", domain, "profiles",
                                       self.plenary_core)
                mainfile = os.path.join(
                    maindir, self.plenary_template + TEMPLATE_EXTENSION)
                remove_file(mainfile, logger=self.logger)
                try:
                    os.removedirs(maindir)
                except OSError:
                    pass
                self.removed = True
            else:
                # Non-object templates do not depend on the domain, so calling
                # remove() is fine
                self.remove(locked=True)
        except:
            if not locked:
                self.restore_stash()
            raise
        finally:
            if not locked:
                lock_queue.release(key)
예제 #20
0
파일: add_manager.py 프로젝트: jrha/aquilon
    def render(self, session, logger, hostname, manager, interface, mac,
               comments, **arguments):
        dbhost = hostname_to_host(session, hostname)
        dbmachine = dbhost.machine
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        if not manager:
            manager = "%sr.%s" % (dbmachine.primary_name.fqdn.name,
                                  dbmachine.primary_name.fqdn.dns_domain.name)

        dbinterface = get_or_create_interface(session, dbmachine,
                                              name=interface, mac=mac,
                                              interface_type='management')

        addrs = ", ".join(["%s [%s]" % (addr.logical_name, addr.ip) for addr
                           in dbinterface.assignments])
        if addrs:
            raise ArgumentError("{0} already has the following addresses: "
                                "{1}.".format(dbinterface, addrs))

        audit_results = []
        ip = generate_ip(session, logger, dbinterface, compel=True,
                         audit_results=audit_results, **arguments)

        dbdns_rec, newly_created = grab_address(session, manager, ip,
                                                comments=comments,
                                                preclude=True)

        assign_address(dbinterface, ip, dbdns_rec.network)

        session.flush()

        plenary_info = PlenaryMachineInfo(dbmachine, logger=logger)
        key = plenary_info.get_write_key()
        try:
            lock_queue.acquire(key)
            plenary_info.write(locked=True)

            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.update_host(dbmachine, oldinfo)
            dsdb_runner.commit_or_rollback("Could not add host to DSDB")
        except:
            plenary_info.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        if dbmachine.host:
            # XXX: Host needs to be reconfigured.
            pass

        for name, value in audit_results:
            self.audit_result(session, name, value, **arguments)
        return
예제 #21
0
파일: uncluster.py 프로젝트: ned21/aquilon
    def render(self, session, logger, hostname, cluster, personality,
               **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbhost = hostname_to_host(session, hostname)
        if not dbhost.cluster:
            raise ArgumentError(
                "{0} is not bound to a cluster.".format(dbhost))
        if dbhost.cluster != dbcluster:
            raise ArgumentError("{0} is bound to {1:l}, not {2:l}.".format(
                dbhost, dbhost.cluster, dbcluster))

        if personality:
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=dbhost.archetype,
                                                   compel=True)
            if dbpersonality.cluster_required:
                raise ArgumentError("Cannot switch host to personality %s "
                                    "because that personality requires a "
                                    "cluster" % personality)
            dbhost.personality = dbpersonality
        elif dbhost.personality.cluster_required:
            raise ArgumentError("Host personality %s requires a cluster, "
                                "use --personality to change personality "
                                "when leaving the cluster." %
                                dbhost.personality.name)

        dbcluster.hosts.remove(dbhost)
        remove_service_addresses(dbcluster, dbhost)
        dbcluster.validate()

        session.flush()
        session.expire(dbhost, ['_cluster'])

        # Will need to write a cluster plenary and either write or
        # remove a host plenary.  Grab the domain key since the two
        # must be in the same domain.
        host_plenary = Plenary.get_plenary(dbhost, logger=logger)
        cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger)
        key = CompileKey(domain=dbcluster.branch.name, logger=logger)
        try:
            lock_queue.acquire(key)
            cluster_plenary.write(locked=True)
            try:
                host_plenary.write(locked=True)
            except IncompleteError:
                host_plenary.cleanup(domain=dbhost.branch.name, locked=True)
        except:
            cluster_plenary.restore_stash()
            host_plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)
예제 #22
0
def sync_domain(dbdomain, logger=LOGGER, locked=False):
    """Update templates on disk to match contents of branch in template-king.

    If this domain is tracking another, first update the branch in
    template-king with the latest from the tracking branch.  Also save
    the current (previous) commit as a potential rollback point.

    """
    config = Config()
    session = object_session(dbdomain)
    kingdir = config.get("broker", "kingdir")
    domaindir = os.path.join(config.get("broker", "domainsdir"), dbdomain.name)
    git_env = {
        "PATH":
        "%s:%s" %
        (config.get("broker", "git_path"), os.environ.get("PATH", ""))
    }
    if dbdomain.tracked_branch:
        # Might need to revisit if using this helper from rollback...
        run_command([
            "git", "push", ".",
            "%s:%s" % (dbdomain.tracked_branch.name, dbdomain.name)
        ],
                    path=kingdir,
                    env=git_env,
                    logger=logger)
    run_command(["git", "fetch", "--prune"],
                path=domaindir,
                env=git_env,
                logger=logger)
    if dbdomain.tracked_branch:
        out = run_command(["git", "rev-list", "-n", "1", "HEAD"],
                          path=domaindir,
                          env=git_env,
                          logger=logger)
        rollback_commit = out.strip()
    try:
        if not locked:
            key = CompileKey(domain=dbdomain.name, logger=logger)
            lock_queue.acquire(key)
        run_command(["git", "reset", "--hard",
                     "origin/%s" % dbdomain.name],
                    path=domaindir,
                    env=git_env,
                    logger=logger)
    finally:
        if not locked:
            lock_queue.release(key)
    if dbdomain.tracked_branch:
        dbdomain.rollback_commit = rollback_commit
        session.add(dbdomain)
예제 #23
0
파일: uncluster.py 프로젝트: jrha/aquilon
    def render(self, session, logger, hostname, cluster,
               personality, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbhost = hostname_to_host(session, hostname)
        if not dbhost.cluster:
            raise ArgumentError("{0} is not bound to a cluster.".format(dbhost))
        if dbhost.cluster != dbcluster:
            raise ArgumentError("{0} is bound to {1:l}, not {2:l}.".format(
                                dbhost, dbhost.cluster, dbcluster))

        if personality:
            dbpersonality = Personality.get_unique(session, name=personality,
                                                   archetype=dbhost.archetype,
                                                   compel=True)
            if dbpersonality.cluster_required:
                raise ArgumentError("Cannot switch host to personality %s "
                                    "because that personality requires a "
                                    "cluster" % personality)
            dbhost.personality = dbpersonality
        elif dbhost.personality.cluster_required:
            raise ArgumentError("Host personality %s requires a cluster, "
                                "use --personality to change personality "
                                "when leaving the cluster." %
                                dbhost.personality.name)

        dbcluster.hosts.remove(dbhost)
        remove_service_addresses(dbcluster, dbhost)
        dbcluster.validate()

        session.flush()
        session.expire(dbhost, ['_cluster'])

        # Will need to write a cluster plenary and either write or
        # remove a host plenary.  Grab the domain key since the two
        # must be in the same domain.
        host_plenary = Plenary.get_plenary(dbhost, logger=logger)
        cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger)
        key = CompileKey(domain=dbcluster.branch.name, logger=logger)
        try:
            lock_queue.acquire(key)
            cluster_plenary.write(locked=True)
            try:
                host_plenary.write(locked=True)
            except IncompleteError:
                host_plenary.cleanup(domain=dbhost.branch.name, locked=True)
        except:
            cluster_plenary.restore_stash()
            host_plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)
예제 #24
0
파일: rollback.py 프로젝트: ned21/aquilon
    def render(self, session, logger, domain, ref, lastsync, **arguments):
        dbdomain = Domain.get_unique(session, domain, compel=True)
        if not dbdomain.tracked_branch:
            # Could check dbdomain.trackers and rollback all of them...
            raise ArgumentError("rollback requires a tracking domain")

        if lastsync:
            if not dbdomain.rollback_commit:
                raise ArgumentError("domain %s does not have a rollback "
                                    "commit saved, please specify one "
                                    "explicitly." % dbdomain.name)
            ref = dbdomain.rollback_commit

        if not ref:
            raise ArgumentError("Commit reference to rollback to required.")

        kingdir = self.config.get("broker", "kingdir")
        domaindir = os.path.join(self.config.get("broker", "domainsdir"),
                                 dbdomain.name)
        out = run_git(["branch", "--contains", ref],
                      logger=logger,
                      path=kingdir)
        if not re.search(r'\b%s\b' % dbdomain.tracked_branch.name, out):
            # There's no real technical reason why this needs to be
            # true.  It just seems like a good sanity check.
            raise ArgumentError("Cannot roll back to commit: "
                                "branch %s does not contain %s" %
                                (dbdomain.tracked_branch.name, ref))

        dbdomain.tracked_branch.is_sync_valid = False
        session.add(dbdomain.tracked_branch)
        dbdomain.rollback_commit = None
        session.add(dbdomain)

        key = CompileKey(domain=dbdomain.name, logger=logger)
        try:
            lock_queue.acquire(key)
            run_git(["push", ".", "+%s:%s" % (ref, dbdomain.name)],
                    path=kingdir,
                    logger=logger)
            # Duplicated this logic from aquilon.worker.processes.sync_domain()
            run_git(["fetch"], path=domaindir, logger=logger)
            run_git(["reset", "--hard",
                     "origin/%s" % dbdomain.name],
                    path=domaindir,
                    logger=logger)
        except ProcessException, e:
            raise ArgumentError(
                "Problem encountered updating templates for "
                "domain %s: %s", dbdomain.name, e)
예제 #25
0
파일: base.py 프로젝트: piojo/aquilon
    def remove(self, locked=False, remove_profile=False):
        """
        remove all files related to an object template including
        any intermediate build files
        """
        key = None
        try:
            if not locked:
                key = self.get_key()
                lock_queue.acquire(key)
            self.stash()

            # Only one or the other of .xml/.xml.gz should be there...
            # it doesn't hurt to clean up both.
            # .xml.dep is used up to and including panc 9.2
            # .dep is used by panc 9.4 and higher
            basename = os.path.join(self.config.get("broker", "quattordir"),
                                    "build", self.old_branch, self.old_name)
            for ext in (".xml", ".xml.gz", ".xml.dep", ".dep"):
                remove_file(basename + ext, logger=self.logger)
            try:
                os.removedirs(os.path.dirname(basename))
            except OSError:
                pass

            super(ObjectPlenary, self).remove(locked=True)

            if remove_profile:
                basename = os.path.join(self.config.get("broker",
                                                        "profilesdir"),
                                        self.old_name)
                # Only one of these should exist, but it doesn't hurt
                # to try to clean up both.
                for ext in (".xml", ".xml.gz"):
                    remove_file(basename + ext, logger=self.logger)

                # Remove the cached template created by ant
                remove_file(os.path.join(self.config.get("broker",
                                                         "quattordir"),
                                         "objects",
                                         self.old_name + self.TEMPLATE_EXTENSION),
                            logger=self.logger)
        except:
            if not locked:
                self.restore_stash()
            raise
        finally:
            if not locked:
                lock_queue.release(key)
예제 #26
0
파일: base.py 프로젝트: piojo/aquilon
 def remove(self, locked=False, remove_profile=False):
     self.stash()
     key = None
     try:
         if not locked:
             key = self.get_key()
             lock_queue.acquire(key)
         for plen in self.plenaries:
             plen.remove(locked=True, remove_profile=remove_profile)
     except:
         if not locked:
             self.restore_stash()
         raise
     finally:
         if not locked:
             lock_queue.release(key)
예제 #27
0
파일: base.py 프로젝트: jrha/aquilon
 def cleanup(self, domain, locked=False):
     key = None
     try:
         if not locked:
             key = self.get_remove_key()
             lock_queue.acquire(key)
         self.stash()
         for plen in self.plenaries:
             plen.cleanup(domain, locked=True)
     except:
         if not locked:
             self.restore_stash()
         raise
     finally:
         if not locked:
             lock_queue.release(key)
예제 #28
0
파일: base.py 프로젝트: ned21/aquilon
 def cleanup(self, domain, locked=False):
     key = None
     try:
         if not locked:
             key = self.get_remove_key()
             lock_queue.acquire(key)
         self.stash()
         for plen in self.plenaries:
             plen.cleanup(domain, locked=True)
     except:
         if not locked:
             self.restore_stash()
         raise
     finally:
         if not locked:
             lock_queue.release(key)
예제 #29
0
파일: base.py 프로젝트: ned21/aquilon
    def write(self, locked=False, content=None):
        """Write out the template.

        If the content is unchanged, then the file will not be modified
        (preserving the mtime).

        Returns the number of files that were written.

        If locked is True then it is assumed that error handling happens
        higher in the call stack.

        """

        if self.template_type == "object" and \
           hasattr(self.dbobj, "personality") and \
           self.dbobj.personality and \
           not self.dbobj.personality.archetype.is_compileable:
            return 0

        if content is None:
            if not self.new_content:
                self.new_content = self._generate_content()
            content = self.new_content

        self.stash()
        if self.old_content == content and \
           not self.removed and not self.changed:
            # optimise out the write (leaving the mtime good for ant)
            # if nothing is actually changed
            return 0

        key = None
        try:
            if not locked:
                key = self.get_write_key()
                lock_queue.acquire(key)
            if not os.path.exists(self.plenary_directory):
                os.makedirs(self.plenary_directory)
            write_file(self.plenary_file, content, logger=self.logger)
            self.removed = False
            if self.old_content != content:
                self.changed = True
        except Exception, e:
            if not locked:
                self.restore_stash()
            raise e
예제 #30
0
파일: base.py 프로젝트: jrha/aquilon
    def write(self, locked=False, content=None):
        """Write out the template.

        If the content is unchanged, then the file will not be modified
        (preserving the mtime).

        Returns the number of files that were written.

        If locked is True then it is assumed that error handling happens
        higher in the call stack.

        """

        if self.template_type == "object" and \
           hasattr(self.dbobj, "personality") and \
           self.dbobj.personality and \
           not self.dbobj.personality.archetype.is_compileable:
            return 0

        if content is None:
            if not self.new_content:
                self.new_content = self._generate_content()
            content = self.new_content

        self.stash()
        if self.old_content == content and \
           not self.removed and not self.changed:
            # optimise out the write (leaving the mtime good for ant)
            # if nothing is actually changed
            return 0

        key = None
        try:
            if not locked:
                key = self.get_write_key()
                lock_queue.acquire(key)
            if not os.path.exists(self.plenary_directory):
                os.makedirs(self.plenary_directory)
            write_file(self.plenary_file, content, logger=self.logger)
            self.removed = False
            if self.old_content != content:
                self.changed = True
        except Exception, e:
            if not locked:
                self.restore_stash()
            raise e
예제 #31
0
파일: rollback.py 프로젝트: jrha/aquilon
    def render(self, session, logger, domain, ref, lastsync, **arguments):
        dbdomain = Domain.get_unique(session, domain, compel=True)
        if not dbdomain.tracked_branch:
            # Could check dbdomain.trackers and rollback all of them...
            raise ArgumentError("rollback requires a tracking domain")

        if lastsync:
            if not dbdomain.rollback_commit:
                raise ArgumentError("domain %s does not have a rollback "
                                    "commit saved, please specify one "
                                    "explicitly." % dbdomain.name)
            ref = dbdomain.rollback_commit

        if not ref:
            raise ArgumentError("Commit reference to rollback to required.")

        kingdir = self.config.get("broker", "kingdir")
        domaindir = os.path.join(self.config.get("broker", "domainsdir"),
                                 dbdomain.name)
        out = run_git(["branch", "--contains", ref],
                      logger=logger, path=kingdir)
        if not re.search(r'\b%s\b' % dbdomain.tracked_branch.name, out):
            # There's no real technical reason why this needs to be
            # true.  It just seems like a good sanity check.
            raise ArgumentError("Cannot roll back to commit: "
                                "branch %s does not contain %s" %
                                (dbdomain.tracked_branch.name, ref))

        dbdomain.tracked_branch.is_sync_valid = False
        session.add(dbdomain.tracked_branch)
        dbdomain.rollback_commit = None
        session.add(dbdomain)

        key = CompileKey(domain=dbdomain.name, logger=logger)
        try:
            lock_queue.acquire(key)
            run_git(["push", ".", "+%s:%s" % (ref, dbdomain.name)],
                    path=kingdir, logger=logger)
            # Duplicated this logic from aquilon.worker.processes.sync_domain()
            run_git(["fetch"], path=domaindir, logger=logger)
            run_git(["reset", "--hard", "origin/%s" % dbdomain.name],
                    path=domaindir, logger=logger)
        except ProcessException, e:
            raise ArgumentError("Problem encountered updating templates for "
                                "domain %s: %s", dbdomain.name, e)
예제 #32
0
    def render(self, session, logger, machine, dbuser, **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)

        remove_plenaries = PlenaryCollection(logger=logger)
        remove_plenaries.append(Plenary.get_plenary(dbmachine))
        if dbmachine.vm_container:
            remove_plenaries.append(Plenary.get_plenary(
                dbmachine.vm_container))
            dbcontainer = dbmachine.vm_container.holder.holder_object
        else:
            dbcontainer = None

        if dbmachine.host:
            raise ArgumentError("{0} is still in use by {1:l} and cannot be "
                                "deleted.".format(dbmachine, dbmachine.host))
        addrs = []
        for addr in dbmachine.all_addresses():
            addrs.append("%s: %s" % (addr.logical_name, addr.ip))
        if addrs:
            addrmsg = ", ".join(addrs)
            raise ArgumentError("{0} still provides the following addresses, "
                                "delete them first: {1}.".format(
                                    dbmachine, addrmsg))

        session.delete(dbmachine)
        session.flush()

        key = remove_plenaries.get_remove_key()
        if dbcontainer:
            plenary_container = Plenary.get_plenary(dbcontainer, logger=logger)
            key = CompileKey.merge([key, plenary_container.get_write_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            if dbcontainer:
                plenary_container.write(locked=True)
            remove_plenaries.remove(locked=True)
        except:
            remove_plenaries.restore_stash()
            if dbcontainer:
                plenary_container.restore_stash()
            raise
        finally:
            lock_queue.release(key)
        return
예제 #33
0
파일: del_machine.py 프로젝트: jrha/aquilon
    def render(self, session, logger, machine, dbuser, **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)

        remove_plenaries = PlenaryCollection(logger=logger)
        remove_plenaries.append(Plenary.get_plenary(dbmachine))
        if dbmachine.vm_container:
            remove_plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
            dbcontainer = dbmachine.vm_container.holder.holder_object
        else:
            dbcontainer = None

        if dbmachine.host:
            raise ArgumentError("{0} is still in use by {1:l} and cannot be "
                                "deleted.".format(dbmachine, dbmachine.host))
        addrs = []
        for addr in dbmachine.all_addresses():
            addrs.append("%s: %s" % (addr.logical_name, addr.ip))
        if addrs:
            addrmsg = ", ".join(addrs)
            raise ArgumentError("{0} still provides the following addresses, "
                                "delete them first: {1}.".format(dbmachine,
                                                                 addrmsg))

        session.delete(dbmachine)
        session.flush()

        key = remove_plenaries.get_remove_key()
        if dbcontainer:
            plenary_container = Plenary.get_plenary(dbcontainer, logger=logger)
            key = CompileKey.merge([key, plenary_container.get_write_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            if dbcontainer:
                plenary_container.write(locked=True)
            remove_plenaries.remove(locked=True)
        except:
            remove_plenaries.restore_stash()
            if dbcontainer:
                plenary_container.restore_stash()
            raise
        finally:
            lock_queue.release(key)
        return
예제 #34
0
 def compile(self, session, logger, dbhost):
     """ compile plenary templates """
     plenary = PlenaryHost(dbhost, logger=logger)
     # Force a host lock as pan might overwrite the profile...
     key = CompileKey(domain=dbhost.branch.name, profile=dbhost.fqdn,
                      logger=logger)
     try:
         lock_queue.acquire(key)
         plenary.write(locked=True)
         td = TemplateDomain(dbhost.branch, dbhost.sandbox_author,
                             logger=logger)
         td.compile(session, only=[dbhost.fqdn], locked=True)
     except IncompleteError:
         raise ArgumentError("Run aq make for host %s first." % dbhost.fqdn)
     except:
         plenary.restore_stash()
         raise
     finally:
         lock_queue.release(key)
예제 #35
0
    def render(self, session, logger, cluster, keepbindings, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        if not dbcluster.personality.archetype.is_compileable:
            raise ArgumentError("{0} is not a compilable archetype "
                                "({1!s}).".format(
                                    dbcluster,
                                    dbcluster.personality.archetype))

        chooser = Chooser(dbcluster,
                          logger=logger,
                          required_only=not (keepbindings))
        chooser.set_required()
        chooser.flush_changes()
        # Force a domain lock as pan might overwrite any of the profiles...
        key = CompileKey.merge([
            chooser.get_write_key(),
            CompileKey(domain=dbcluster.branch.name, logger=logger)
        ])
        try:
            lock_queue.acquire(key)
            chooser.write_plenary_templates(locked=True)

            profile_list = add_cluster_data(dbcluster)
            profile_list.extend(chooser.changed_server_fqdns())

            td = TemplateDomain(dbcluster.branch,
                                dbcluster.sandbox_author,
                                logger=logger)
            td.compile(session, only=profile_list, locked=True)

        except:
            chooser.restore_stash()

            # Okay, cleaned up templates, make sure the caller knows
            # we've aborted so that DB can be appropriately rollback'd.

            raise

        finally:
            lock_queue.release(key)

        return
예제 #36
0
 def compile(self, session, logger, dbhost):
     """ compile plenary templates """
     plenary = PlenaryHost(dbhost, logger=logger)
     # Force a host lock as pan might overwrite the profile...
     key = CompileKey(domain=dbhost.branch.name,
                      profile=dbhost.fqdn,
                      logger=logger)
     try:
         lock_queue.acquire(key)
         plenary.write(locked=True)
         td = TemplateDomain(dbhost.branch,
                             dbhost.sandbox_author,
                             logger=logger)
         td.compile(session, only=[dbhost.fqdn], locked=True)
     except IncompleteError:
         raise ArgumentError("Run aq make for host %s first." % dbhost.fqdn)
     except:
         plenary.restore_stash()
         raise
     finally:
         lock_queue.release(key)
예제 #37
0
    def compile(self, session, dbhost, logger, keepbindings):
        chooser = Chooser(dbhost,
                          logger=logger,
                          required_only=not (keepbindings))
        chooser.set_required()
        chooser.flush_changes()

        hosts = chooser.changed_server_fqdns()
        hosts.add(dbhost.fqdn)

        # Force a host lock as pan might overwrite the profile...
        key = chooser.get_write_key()
        for fqdn in hosts:
            key = CompileKey.merge([
                key,
                CompileKey(domain=dbhost.branch.name,
                           profile=fqdn,
                           logger=logger)
            ])
        try:
            lock_queue.acquire(key)
            chooser.write_plenary_templates(locked=True)

            td = TemplateDomain(dbhost.branch,
                                dbhost.sandbox_author,
                                logger=logger)
            td.compile(session, only=hosts, locked=True)

        except:
            if chooser:
                chooser.restore_stash()

            # Okay, cleaned up templates, make sure the caller knows
            # we've aborted so that DB can be appropriately rollback'd.
            raise

        finally:
            lock_queue.release(key)

        return
예제 #38
0
파일: base.py 프로젝트: piojo/aquilon
    def remove(self, locked=False, remove_profile=False):
        """
        remove this plenary template
        """

        key = None
        try:
            if not locked:
                key = self.get_key()
                lock_queue.acquire(key)
            self.stash()

            self.logger.debug("Removing %r [%s]" % (self, self.old_path))
            remove_file(self.old_path, cleanup_directory=True,
                        logger=self.logger)
            self.removed = True
        # Most of the error handling routines would restore_stash...
        # but there's no need here if the remove failed. :)
        finally:
            if not locked:
                lock_queue.release(key)
        return
예제 #39
0
    def render(self, session, logger, cluster, keepbindings, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        if not dbcluster.personality.archetype.is_compileable:
            raise ArgumentError("{0} is not a compilable archetype "
                                "({1!s}).".format(dbcluster,
                                                  dbcluster.personality.archetype))

        chooser = Chooser(dbcluster, logger=logger,
                          required_only=not(keepbindings))
        chooser.set_required()
        chooser.flush_changes()
        # Force a domain lock as pan might overwrite any of the profiles...
        key = CompileKey.merge([chooser.get_write_key(),
                                CompileKey(domain=dbcluster.branch.name,
                                           logger=logger)])
        try:
            lock_queue.acquire(key)
            chooser.write_plenary_templates(locked=True)

            profile_list = add_cluster_data(dbcluster)
            profile_list.extend(chooser.changed_server_fqdns())

            td = TemplateDomain(dbcluster.branch, dbcluster.sandbox_author,
                                logger=logger)
            td.compile(session, only=profile_list, locked=True)

        except:
            chooser.restore_stash()

            # Okay, cleaned up templates, make sure the caller knows
            # we've aborted so that DB can be appropriately rollback'd.

            raise

        finally:
            lock_queue.release(key)

        return
예제 #40
0
파일: base.py 프로젝트: ned21/aquilon
    def remove(self, locked=False):
        """
        remove this plenary template
        """

        key = None
        try:
            if not locked:
                key = self.get_remove_key()
                lock_queue.acquire(key)
            self.stash()
            remove_file(self.plenary_file, logger=self.logger)
            try:
                os.removedirs(self.plenary_directory)
            except OSError:
                pass
            self.removed = True
        # Most of the error handling routines would restore_stash...
        # but there's no need here if the remove failed. :)
        finally:
            if not locked:
                lock_queue.release(key)
        return
예제 #41
0
파일: base.py 프로젝트: jrha/aquilon
    def remove(self, locked=False):
        """
        remove this plenary template
        """

        key = None
        try:
            if not locked:
                key = self.get_remove_key()
                lock_queue.acquire(key)
            self.stash()
            remove_file(self.plenary_file, logger=self.logger)
            try:
                os.removedirs(self.plenary_directory)
            except OSError:
                pass
            self.removed = True
        # Most of the error handling routines would restore_stash...
        # but there's no need here if the remove failed. :)
        finally:
            if not locked:
                lock_queue.release(key)
        return
예제 #42
0
    def render(self, session, logger, cluster, buildstatus, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbstatus = ClusterLifecycle.get_unique(session,
                                               buildstatus,
                                               compel=True)

        if not dbcluster.status.transition(dbcluster, dbstatus):
            return

        if not dbcluster.personality.archetype.is_compileable:
            return

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        for dbhost in dbcluster.hosts:
            plenaries.append(Plenary.get_plenary(dbhost))

        # Force a host lock as pan might overwrite the profile...
        key = CompileKey(domain=dbcluster.branch.name, logger=logger)
        try:
            lock_queue.acquire(key)

            plenaries.write(locked=True)
            td = TemplateDomain(dbcluster.branch,
                                dbcluster.sandbox_author,
                                logger=logger)
            td.compile(session, plenaries.object_templates, locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)
        return
예제 #43
0
    def render(self, session, logger, building, dryrun, incremental,
               **arguments):
        if building:
            dbbuilding = get_location(session, building=building)
        else:
            dbbuilding = None

        # --dryrun and --incremental do not mix well
        if dryrun and incremental:
            raise ArgumentError("--dryrun and --incremental cannot be given "
                                "simultaneously.")

        key = SyncKey(data="network", logger=logger)
        lock_queue.acquire(key)

        rundir = self.config.get("broker", "rundir")
        tempdir = mkdtemp(prefix="refresh_network_", dir=rundir)
        try:
            args = [
                self.config.get("broker", "qip_dump_subnetdata"),
                "--datarootdir", tempdir, "--format", "txt", "--noaudit"
            ]
            run_command(args, logger=logger)

            subnetdata = file(os.path.join(tempdir, "subnetdata.txt"), "r")
            refresher = QIPRefresh(session, logger, dbbuilding, dryrun,
                                   incremental)
            refresher.refresh(subnetdata)

            session.flush()

            if dryrun:
                session.rollback()
        finally:
            lock_queue.release(key)
            remove_dir(tempdir, logger=logger)
예제 #44
0
    def render(self, session, logger, switch, model, rack, type, ip, vendor,
               serial, rename_to, discovered_macs, clear, discover, comments,
               **arguments):
        dbswitch = Switch.get_unique(session, switch, compel=True)

        oldinfo = DSDBRunner.snapshot_hw(dbswitch)

        if discover:
            discover_switch(session, logger, self.config, dbswitch, False)

        if vendor and not model:
            model = dbswitch.model.name
        if model:
            dbmodel = Model.get_unique(session,
                                       name=model,
                                       vendor=vendor,
                                       machine_type='switch',
                                       compel=True)
            dbswitch.model = dbmodel

        dblocation = get_location(session, rack=rack)
        if dblocation:
            dbswitch.location = dblocation

        if serial is not None:
            dbswitch.serial_no = serial

        # FIXME: What do the error messages for an invalid enum (switch_type)
        # look like?
        if type:
            Switch.check_type(type)
            dbswitch.switch_type = type

        if ip:
            update_primary_ip(session, dbswitch, ip)

        if comments is not None:
            dbswitch.comments = comments

        remove_plenary = None
        if rename_to:
            # Handling alias renaming would not be difficult in AQDB, but the
            # DSDB synchronization would be painful, so don't do that for now.
            # In theory we should check all configured IP addresses for aliases,
            # but this is the most common case
            if dbswitch.primary_name and dbswitch.primary_name.fqdn.aliases:
                raise ArgumentError(
                    "The switch has aliases and it cannot be "
                    "renamed. Please remove all aliases first.")
            remove_plenary = Plenary.get_plenary(dbswitch, logger=logger)
            rename_hardware(session, dbswitch, rename_to)

        if clear:
            session.query(ObservedMac).filter_by(switch=dbswitch).delete()

        if discovered_macs:
            now = datetime.now()
            for (macaddr, port) in discovered_macs:
                update_or_create_observed_mac(session, dbswitch, port, macaddr,
                                              now)

        session.flush()

        switch_plenary = Plenary.get_plenary(dbswitch, logger=logger)

        key = switch_plenary.get_write_key()
        if remove_plenary:
            key = CompileKey.merge([key, remove_plenary.get_remove_key()])
        try:
            lock_queue.acquire(key)
            if remove_plenary:
                remove_plenary.stash()
                remove_plenary.remove(locked=True)
            switch_plenary.write(locked=True)

            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.update_host(dbswitch, oldinfo)
            dsdb_runner.commit_or_rollback("Could not update switch in DSDB")
        except:
            if remove_plenary:
                remove_plenary.restore_stash()
            switch_plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
예제 #45
0
파일: cluster.py 프로젝트: ned21/aquilon
    def render(self, session, logger, hostname, cluster, personality,
               **arguments):
        dbhost = hostname_to_host(session, hostname)
        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        if dbcluster.status.name == 'decommissioned':
            raise ArgumentError("Cannot add hosts to decommissioned clusters.")

        # We only support changing personality within the same
        # archetype. The archetype decides things like which OS, how
        # it builds (dhcp, etc), whether it's compilable, and
        # switching all of that by side-effect seems wrong
        # somehow. And besides, it would make the user-interface and
        # implementation for this command ugly in order to support
        # changing all of those options.
        personality_change = False
        if personality is not None:
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=dbhost.archetype,
                                                   compel=True)
            if dbhost.personality != dbpersonality:
                dbhost.personality = dbpersonality
                personality_change = True

        # Allow for non-restricted clusters (the default?)
        if (len(dbcluster.allowed_personalities) > 0
                and dbhost.personality not in dbcluster.allowed_personalities):
            raise ArgumentError(
                "The personality %s for %s is not allowed "
                "by the cluster. Specify --personality "
                "and provide one of %s" %
                (dbhost.personality, dbhost.fqdn, ", ".join(
                    [x.name for x in dbcluster.allowed_personalities])))

        # Now that we've changed the personality, we can check
        # if this is a valid membership change
        dbcluster.validate_membership(dbhost)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        if dbhost.cluster and dbhost.cluster != dbcluster:
            logger.client_info("Removing {0:l} from {1:l}.".format(
                dbhost, dbhost.cluster))
            old_cluster = dbhost.cluster
            old_cluster.hosts.remove(dbhost)
            remove_service_addresses(old_cluster, dbhost)
            old_cluster.validate()
            session.expire(dbhost, ['_cluster'])
            plenaries.append(Plenary.get_plenary(old_cluster))

        # Apply the service addresses to the new member
        for res in walk_resources(dbcluster):
            if not isinstance(res, ServiceAddress):
                continue
            apply_service_address(dbhost, res.interfaces, res)

        if dbhost.cluster:
            if personality_change:
                raise ArgumentError(
                    "{0:l} already in {1:l}, use "
                    "aq reconfigure to change personality.".format(
                        dbhost, dbhost.cluster))
            # the cluster has not changed, therefore there's nothing
            # to do here.
            return

        # Calculate the node index: build a map of all possible values, remove
        # the used ones, and pick the smallest remaining one
        node_index_map = set(xrange(len(dbcluster._hosts) + 1))
        for link in dbcluster._hosts:
            # The cluster may have been bigger in the past, so node indexes may
            # be larger than the current cluster size
            try:
                node_index_map.remove(link.node_index)
            except KeyError:
                pass

        dbcluster.hosts.append((dbhost, min(node_index_map)))
        dbcluster.validate()

        # demote a host when switching clusters
        # promote a host when switching clusters
        if dbhost.status.name == 'ready':
            if dbcluster.status.name != 'ready':
                dbalmost = HostLifecycle.get_unique(session,
                                                    'almostready',
                                                    compel=True)
                dbhost.status.transition(dbhost, dbalmost)
                plenaries.append(Plenary.get_plenary(dbhost))
        elif dbhost.status.name == 'almostready':
            if dbcluster.status.name == 'ready':
                dbready = HostLifecycle.get_unique(session,
                                                   'ready',
                                                   compel=True)
                dbhost.status.transition(dbhost, dbready)
                plenaries.append(Plenary.get_plenary(dbhost))

        session.flush()

        # Enforce that service instances are set correctly for the
        # new cluster association.
        chooser = Chooser(dbhost, logger=logger)
        chooser.set_required()
        chooser.flush_changes()
        # the chooser will include the host plenary
        key = CompileKey.merge(
            [chooser.get_write_key(),
             plenaries.get_write_key()])

        try:
            lock_queue.acquire(key)
            chooser.write_plenary_templates(locked=True)
            plenaries.write(locked=True)
        except:
            chooser.restore_stash()
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
예제 #46
0
파일: base.py 프로젝트: piojo/aquilon
    def write(self, locked=False):
        """Write out the template.

        If the content is unchanged, then the file will not be modified
        (preserving the mtime).

        Returns the number of files that were written.

        If locked is True then it is assumed that error handling happens
        higher in the call stack.

        """

        if self.template_type == "object" and \
           hasattr(self.dbobj, "personality") and \
           self.dbobj.personality and \
           not self.dbobj.personality.archetype.is_compileable:
            return 0

        # This is a hack to handle the case when the DB object has been deleted,
        # but a plenary instance still references it (probably buried inside a
        # PlenaryCollection). Calling self.will_change() on such a plenary would
        # fail, because the primary key is None, which is otherwise impossible.
        if isinstance(self.dbobj, Base):
            state = inspect(self.dbobj)
            if state.deleted:
                return 0

        if not self.new_content:
            self.new_content = self._generate_content()
        content = self.new_content

        key = None
        try:
            if not locked:
                key = self.get_key()
                lock_queue.acquire(key)

            self.stash()

            if self.old_content == content and \
               not self.removed and not self.changed:
                # optimise out the write (leaving the mtime good for ant)
                # if nothing is actually changed
                return 0

            if not self.new_path:
                raise InternalError("New path not set - likely write() is "
                                    "called on deleted object.")

            # If the plenary has moved, then clean up any potential leftover
            # files from the old location
            if self.new_path != self.old_path:
                self.remove(locked=True)

            self.logger.debug("Writing %r [%s]" % (self, self.new_path))

            write_file(self.new_path, content, create_directory=True,
                       logger=self.logger)
            self.removed = False
            if self.old_content != content:
                self.changed = True
        except Exception, e:
            if not locked:
                self.restore_stash()
            raise e
예제 #47
0
파일: add_host.py 프로젝트: ned21/aquilon
    def render(self,
               session,
               logger,
               hostname,
               machine,
               archetype,
               domain,
               sandbox,
               osname,
               osversion,
               buildstatus,
               personality,
               comments,
               zebra_interfaces,
               grn,
               eon_id,
               skip_dsdb_check=False,
               **arguments):
        dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        section = "archetype_" + dbarchetype.name

        # This is for the various add_*_host commands
        if not domain and not sandbox:
            domain = self.config.get(section, "host_domain")

        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError(
                "Adding hosts to {0:l} is not allowed.".format(dbbranch))

        if not buildstatus:
            buildstatus = 'build'
        dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True)
        dbmachine = Machine.get_unique(session, machine, compel=True)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        if not personality:
            if self.config.has_option(section, "default_personality"):
                personality = self.config.get(section, "default_personality")
            else:
                personality = 'generic'
        dbpersonality = Personality.get_unique(session,
                                               name=personality,
                                               archetype=dbarchetype,
                                               compel=True)

        if not osname:
            if self.config.has_option(section, "default_osname"):
                osname = self.config.get(section, "default_osname")
        if not osversion:
            if self.config.has_option(section, "default_osversion"):
                osversion = self.config.get(section, "default_osversion")

        if not osname or not osversion:
            raise ArgumentError("Can not determine a sensible default OS "
                                "for archetype %s. Please use the "
                                "--osname and --osversion parameters." %
                                (dbarchetype.name))

        dbos = OperatingSystem.get_unique(session,
                                          name=osname,
                                          version=osversion,
                                          archetype=dbarchetype,
                                          compel=True)

        if (dbmachine.model.machine_type == 'aurora_node'
                and dbpersonality.archetype.name != 'aurora'):
            raise ArgumentError("Machines of type aurora_node can only be "
                                "added with archetype aurora.")

        if dbmachine.host:
            raise ArgumentError("{0:c} {0.label} is already allocated to "
                                "{1:l}.".format(dbmachine, dbmachine.host))

        if grn or eon_id:
            dbgrn = lookup_grn(session,
                               grn,
                               eon_id,
                               logger=logger,
                               config=self.config)
        else:
            dbgrn = dbpersonality.owner_grn

        dbhost = Host(machine=dbmachine,
                      branch=dbbranch,
                      owner_grn=dbgrn,
                      sandbox_author=dbauthor,
                      personality=dbpersonality,
                      status=dbstatus,
                      operating_system=dbos,
                      comments=comments)
        session.add(dbhost)

        if self.config.has_option("archetype_" + archetype,
                                  "default_grn_target"):
            dbhost.grns.append((dbhost, dbgrn,
                                self.config.get("archetype_" + archetype,
                                                "default_grn_target")))

        if zebra_interfaces:
            # --autoip does not make sense for Zebra (at least not the way it's
            # implemented currently)
            dbinterface = None
        else:
            dbinterface = get_boot_interface(dbmachine)

        # This method is allowed to return None. This can only happen
        # (currently) using add_aurora_host, add_windows_host, or possibly by
        # bypassing the aq client and posting a request directly.
        audit_results = []
        ip = generate_ip(session,
                         logger,
                         dbinterface,
                         audit_results=audit_results,
                         **arguments)

        dbdns_rec, newly_created = grab_address(session,
                                                hostname,
                                                ip,
                                                allow_restricted_domain=True,
                                                allow_reserved=True,
                                                preclude=True)
        dbmachine.primary_name = dbdns_rec

        # Fix up auxiliary addresses to point to the primary name by default
        if ip:
            dns_env = dbdns_rec.fqdn.dns_environment

            for addr in dbmachine.all_addresses():
                if addr.interface.interface_type == "management":
                    continue
                if addr.service_address_id:  # pragma: no cover
                    continue
                for rec in addr.dns_records:
                    if rec.fqdn.dns_environment == dns_env:
                        rec.reverse_ptr = dbdns_rec.fqdn

        if zebra_interfaces:
            if not ip:
                raise ArgumentError(
                    "Zebra configuration requires an IP address.")
            dbsrv_addr = self.assign_zebra_address(session, dbmachine,
                                                   dbdns_rec, zebra_interfaces)
        else:
            if ip:
                if not dbinterface:
                    raise ArgumentError(
                        "You have specified an IP address for the "
                        "host, but {0:l} does not have a bootable "
                        "interface.".format(dbmachine))
                assign_address(dbinterface, ip, dbdns_rec.network)
            dbsrv_addr = None

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if dbmachine.vm_container:
            plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
        if dbsrv_addr:
            plenaries.append(Plenary.get_plenary(dbsrv_addr))

        key = plenaries.get_write_key()
        try:
            lock_queue.acquire(key)
            plenaries.write(locked=True)

            # XXX: This (and some of the code above) is horrible.  There
            # should be a generic/configurable hook here that could kick
            # in based on archetype and/or domain.
            dsdb_runner = DSDBRunner(logger=logger)
            if dbhost.archetype.name == 'aurora':
                # For aurora, check that DSDB has a record of the host.
                if not skip_dsdb_check:
                    try:
                        dsdb_runner.show_host(hostname)
                    except ProcessException, e:
                        raise ArgumentError("Could not find host in DSDB: %s" %
                                            e)
            elif not dbmachine.primary_ip:
                logger.info("No IP for %s, not adding to DSDB." %
                            dbmachine.fqdn)
예제 #48
0
    def render(self, session, logger, machine, model, vendor, serial,
               chassis, slot, clearchassis, multislot,
               vmhost, cluster, allow_metacluster_change,
               cpuname, cpuvendor, cpuspeed, cpucount, memory, ip,
               **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        plenaries = PlenaryCollection(logger=logger)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        if clearchassis:
            del dbmachine.chassis_slot[:]

        remove_plenaries = PlenaryCollection(logger=logger)
        if chassis:
            dbchassis = Chassis.get_unique(session, chassis, compel=True)
            if machine_plenary_will_move(old=dbmachine.location,
                                         new=dbchassis.location):
                remove_plenaries.append(Plenary.get_plenary(dbmachine))
            dbmachine.location = dbchassis.location
            if slot is None:
                raise ArgumentError("Option --chassis requires --slot "
                                    "information.")
            self.adjust_slot(session, logger,
                             dbmachine, dbchassis, slot, multislot)
        elif slot is not None:
            dbchassis = None
            for dbslot in dbmachine.chassis_slot:
                if dbchassis and dbslot.chassis != dbchassis:
                    raise ArgumentError("Machine in multiple chassis, please "
                                        "use --chassis argument.")
                dbchassis = dbslot.chassis
            if not dbchassis:
                raise ArgumentError("Option --slot requires --chassis "
                                    "information.")
            self.adjust_slot(session, logger,
                             dbmachine, dbchassis, slot, multislot)

        dblocation = get_location(session, **arguments)
        if dblocation:
            loc_clear_chassis = False
            for dbslot in dbmachine.chassis_slot:
                dbcl = dbslot.chassis.location
                if dbcl != dblocation:
                    if chassis or slot is not None:
                        raise ArgumentError("{0} conflicts with chassis {1!s} "
                                            "location {2}.".format(dblocation,
                                                        dbslot.chassis, dbcl))
                    else:
                        loc_clear_chassis = True
            if loc_clear_chassis:
                del dbmachine.chassis_slot[:]
            if machine_plenary_will_move(old=dbmachine.location,
                                         new=dblocation):
                remove_plenaries.append(Plenary.get_plenary(dbmachine))
            dbmachine.location = dblocation

        if model or vendor:
            # If overriding model, should probably overwrite default
            # machine specs as well.
            if not model:
                model = dbmachine.model.name
            if not vendor:
                vendor = dbmachine.model.vendor.name
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       compel=True)
            if dbmodel.machine_type not in ['blade', 'rackmount',
                                            'workstation', 'aurora_node',
                                            'virtual_machine']:
                raise ArgumentError("The update_machine command cannot update "
                                    "machines of type %s." %
                                    dbmodel.machine_type)
            # We probably could do this by forcing either cluster or
            # location data to be available as appropriate, but really?
            # Failing seems reasonable.
            if dbmodel.machine_type != dbmachine.model.machine_type and \
               'virtual_machine' in [dbmodel.machine_type,
                                     dbmachine.model.machine_type]:
                raise ArgumentError("Cannot change machine from %s to %s." %
                                    (dbmachine.model.machine_type,
                                     dbmodel.machine_type))

            old_nic_model = dbmachine.model.nic_model
            new_nic_model = dbmodel.nic_model
            if old_nic_model != new_nic_model:
                for iface in dbmachine.interfaces:
                    if iface.model == old_nic_model:
                        iface.model = new_nic_model

            dbmachine.model = dbmodel

        if cpuname or cpuvendor or cpuspeed is not None:
            dbcpu = Cpu.get_unique(session, name=cpuname, vendor=cpuvendor,
                                   speed=cpuspeed, compel=True)
            dbmachine.cpu = dbcpu

        if cpucount is not None:
            dbmachine.cpu_quantity = cpucount
        if memory is not None:
            dbmachine.memory = memory
        if serial:
            dbmachine.serial_no = serial

        if ip:
            update_primary_ip(session, dbmachine, ip)

        # FIXME: For now, if a machine has its interface(s) in a portgroup
        # this command will need to be followed by an update_interface to
        # re-evaluate the portgroup for overflow.
        # It would be better to have --pg and --autopg options to let it
        # happen at this point.
        if cluster or vmhost:
            if not dbmachine.vm_container:
                raise ArgumentError("Cannot convert a physical machine to "
                                    "virtual.")

            old_holder = dbmachine.vm_container.holder.holder_object
            resholder = get_resource_holder(session, hostname=vmhost,
                                            cluster=cluster, compel=False)
            new_holder = resholder.holder_object

            # TODO: do we want to allow moving machines between the cluster and
            # metacluster level?
            if new_holder.__class__ != old_holder.__class__:
                raise ArgumentError("Cannot move a VM between a cluster and a "
                                    "stand-alone host.")

            if cluster:
                if new_holder.metacluster != old_holder.metacluster \
                   and not allow_metacluster_change:
                    raise ArgumentError("Current {0:l} does not match "
                                        "new {1:l}."
                                        .format(old_holder.metacluster,
                                                new_holder.metacluster))

            remove_plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
            dbmachine.vm_container.holder = resholder

            for dbdisk in dbmachine.disks:
                if not isinstance(dbdisk, VirtualDisk):
                    continue
                old_share = dbdisk.share
                if isinstance(old_share.holder, BundleResource):
                    resourcegroup = old_share.holder.name
                else:
                    resourcegroup = None
                new_share = find_share(new_holder, resourcegroup, old_share.name,
                                       error=ArgumentError)

                # If the shares are registered at the metacluster level and both
                # clusters are in the same metacluster, then there will be no
                # real change here
                if new_share != old_share:
                    old_share.disks.remove(dbdisk)
                    new_share.disks.append(dbdisk)

            if isinstance(new_holder, Cluster):
                dbmachine.location = new_holder.location_constraint
            else:
                dbmachine.location = new_holder.location

            session.flush()
            plenaries.append(Plenary.get_plenary(old_holder))
            plenaries.append(Plenary.get_plenary(new_holder))

        if dbmachine.vm_container:
            plenaries.append(Plenary.get_plenary(dbmachine.vm_container))

        session.flush()

        # Check if the changed parameters still meet cluster capacity
        # requiremets
        if dbmachine.cluster:
            dbmachine.cluster.validate()
            if allow_metacluster_change:
                dbmachine.cluster.metacluster.validate()
        if dbmachine.host and dbmachine.host.cluster:
            dbmachine.host.cluster.validate()

        # The check to make sure a plenary file is not written out for
        # dummy aurora hardware is within the call to write().  This way
        # it is consistent without altering (and forgetting to alter)
        # all the calls to the method.
        plenaries.append(Plenary.get_plenary(dbmachine))
        if remove_plenaries.plenaries and dbmachine.host:
            plenaries.append(Plenary.get_plenary(dbmachine.host))

        key = CompileKey.merge([plenaries.get_write_key(),
                                remove_plenaries.get_remove_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            plenaries.write(locked=True)
            remove_plenaries.remove(locked=True)

            if dbmachine.host:
                # XXX: May need to reconfigure.
                pass

            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.update_host(dbmachine, oldinfo)
            dsdb_runner.commit_or_rollback("Could not update machine in DSDB")
        except:
            plenaries.restore_stash()
            remove_plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
예제 #49
0
    def render(self, session, logger, interface, machine, mac, automac, model,
               vendor, pg, autopg, type, comments, **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)
        audit_results = []

        q = session.query(Interface)
        q = q.filter_by(name=interface, hardware_entity=dbmachine)
        if q.first():
            raise ArgumentError(
                "Machine %s already has an interface named %s." %
                (machine, interface))

        if not type:
            type = 'public'
            management_types = ['bmc', 'ilo', 'ipmi']
            for mtype in management_types:
                if interface.startswith(mtype):
                    type = 'management'
                    break

            if interface.startswith("bond"):
                type = 'bonding'
            elif interface.startswith("br"):
                type = 'bridge'

            # Test it last, VLANs can be added on top of almost anything
            if '.' in interface:
                type = 'vlan'

        if type == "oa" or type == "loopback":
            raise ArgumentError("Interface type '%s' is not valid for "
                                "machines." % type)

        bootable = None
        if type == 'public':
            if interface == 'eth0':
                bootable = True
            else:
                bootable = False

        dbmanager = None
        pending_removals = PlenaryCollection()
        dsdb_runner = DSDBRunner(logger=logger)
        if mac:
            prev = session.query(Interface).filter_by(mac=mac).first()
            if prev and prev.hardware_entity == dbmachine:
                raise ArgumentError("{0} already has an interface with MAC "
                                    "address {1}.".format(dbmachine, mac))
            # Is the conflicting interface something that can be
            # removed?  It is if:
            # - we are currently attempting to add a management interface
            # - the old interface belongs to a machine
            # - the old interface is associated with a host
            # - that host was blindly created, and thus can be removed safely
            if prev and type == 'management' and \
               prev.hardware_entity.hardware_type == 'machine' and \
               prev.hardware_entity.host and \
               prev.hardware_entity.host.status.name == 'blind':
                # FIXME: Is this just always allowed?  Maybe restrict
                # to only aqd-admin and the host itself?
                dummy_machine = prev.hardware_entity
                dummy_ip = dummy_machine.primary_ip
                old_fqdn = str(dummy_machine.primary_name)
                old_iface = prev.name
                old_mac = prev.mac
                old_network = get_net_id_from_ip(session, dummy_ip)
                self.remove_prev(session, logger, prev, pending_removals)
                session.flush()
                dsdb_runner.delete_host_details(old_fqdn, dummy_ip, old_iface,
                                                old_mac)
                self.consolidate_names(session, logger, dbmachine,
                                       dummy_machine.label, pending_removals)
                # It seems like a shame to throw away the IP address that
                # had been allocated for the blind host.  Try to use it
                # as it should be used...
                dbmanager = self.add_manager(session, logger, dbmachine,
                                             dummy_ip, old_network)
            elif prev:
                msg = describe_interface(session, prev)
                raise ArgumentError("MAC address %s is already in use: %s." %
                                    (mac, msg))
        elif automac:
            mac = self.generate_mac(session, dbmachine)
            audit_results.append(('mac', mac))
        else:
            #Ignore now that Mac Address can be null
            pass

        if pg is not None:
            port_group = verify_port_group(dbmachine, pg)
        elif autopg:
            port_group = choose_port_group(session, logger, dbmachine)
            audit_results.append(('pg', port_group))
        else:
            port_group = None

        dbinterface = get_or_create_interface(session,
                                              dbmachine,
                                              name=interface,
                                              vendor=vendor,
                                              model=model,
                                              interface_type=type,
                                              mac=mac,
                                              bootable=bootable,
                                              port_group=port_group,
                                              comments=comments,
                                              preclude=True)

        # So far, we're *only* creating a manager if we happen to be
        # removing a blind entry and we can steal its IP address.
        if dbmanager:
            assign_address(dbinterface, dbmanager.ip, dbmanager.network)

        session.add(dbinterface)
        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if pending_removals and dbmachine.host:
            # Not an exact test, but the file won't be re-written
            # if the contents are the same so calling too often is
            # not a major expense.
            plenaries.append(Plenary.get_plenary(dbmachine.host))
        # Even though there may be removals going on the write key
        # should be sufficient here.
        key = plenaries.get_write_key()
        try:
            lock_queue.acquire(key)
            pending_removals.stash()
            plenaries.write(locked=True)
            pending_removals.remove(locked=True)

            dsdb_runner.update_host(dbmachine, oldinfo)
            dsdb_runner.commit_or_rollback("Could not update host in DSDB")
        except:
            plenaries.restore_stash()
            pending_removals.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        if dbmachine.host:
            # FIXME: reconfigure host
            pass

        for name, value in audit_results:
            self.audit_result(session, name, value, **arguments)
        return
예제 #50
0
    def render(
        self,
        session,
        logger,
        machine,
        chassis,
        switch,
        interface,
        fqdn,
        ip,
        label,
        keep_dns,
        network_environment,
        **kwargs
    ):

        if machine:
            hwtype = "machine"
            hwname = machine
        elif chassis:
            hwtype = "chassis"
            hwname = chassis
        elif switch:
            hwtype = "switch"
            hwname = switch

        dbhw_ent = HardwareEntity.get_unique(session, hwname, hardware_type=hwtype, compel=True)
        dbinterface = Interface.get_unique(session, hardware_entity=dbhw_ent, name=interface, compel=True)
        dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment)

        oldinfo = DSDBRunner.snapshot_hw(dbhw_ent)

        if fqdn:
            dbdns_rec = ARecord.get_unique(session, fqdn=fqdn, dns_environment=dbnet_env.dns_environment, compel=True)
            ip = dbdns_rec.ip

        addr = None
        if ip:
            addr = first_of(dbinterface.assignments, lambda x: x.ip == ip)
            if not addr:
                raise ArgumentError("{0} does not have IP address {1} assigned to " "it.".format(dbinterface, ip))
        elif label is not None:
            addr = first_of(dbinterface.assignments, lambda x: x.label == label)
            if not addr:
                raise ArgumentError("{0} does not have an address with label " "{1}.".format(dbinterface, label))

        if not addr:
            raise ArgumentError("Please specify the address to be removed " "using either --ip, --label, or --fqdn.")

        dbnetwork = addr.network
        ip = addr.ip

        if dbnetwork.network_environment != dbnet_env:
            raise ArgumentError(
                "The specified address lives in {0:l}, not in "
                "{1:l}.  Use the --network_environment option "
                "to select the correct environment.".format(dbnetwork.network_environment, dbnet_env)
            )

        # Forbid removing the primary name
        if ip == dbhw_ent.primary_ip:
            raise ArgumentError("The primary IP address of a hardware entity " "cannot be removed.")

        dbinterface.assignments.remove(addr)

        # Check if the address was assigned to multiple interfaces, and remove
        # the DNS entries if this was the last use
        q = session.query(AddressAssignment)
        q = q.filter_by(network=dbnetwork)
        q = q.filter_by(ip=ip)
        other_uses = q.all()
        if not other_uses and not keep_dns:
            q = session.query(ARecord)
            q = q.filter_by(network=dbnetwork)
            q = q.filter_by(ip=ip)
            q = q.join(ARecord.fqdn)
            q = q.filter_by(dns_environment=dbnet_env.dns_environment)
            map(delete_dns_record, q.all())

        session.flush()

        dbhost = getattr(dbhw_ent, "host", None)
        if dbhost:
            plenary_info = PlenaryHost(dbhost, logger=logger)
            key = plenary_info.get_write_key()
            try:
                lock_queue.acquire(key)
                try:
                    plenary_info.write(locked=True)
                except IncompleteError:
                    # FIXME: if this command is used after "add host" but before
                    # "make", then writing out the template will fail due to
                    # required services not being assigned. Ignore this error
                    # for now.
                    plenary_info.restore_stash()

                dsdb_runner = DSDBRunner(logger=logger)
                dsdb_runner.update_host(dbhw_ent, oldinfo)

                if not other_uses and keep_dns:
                    q = session.query(ARecord)
                    q = q.filter_by(network=dbnetwork)
                    q = q.filter_by(ip=ip)
                    dbdns_rec = q.first()
                    dsdb_runner.add_host_details(dbdns_rec.fqdn, ip)

                dsdb_runner.commit_or_rollback("Could not add host to DSDB")
            except:
                plenary_info.restore_stash()
                raise
            finally:
                lock_queue.release(key)
        else:
            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.update_host(dbhw_ent, oldinfo)
            dsdb_runner.commit_or_rollback("Could not add host to DSDB")

        return
예제 #51
0
    def render(self, session, logger, switch, model, rack, type, ip, vendor,
               serial, rename_to, discovered_macs, clear, discover, comments, **arguments):
        dbswitch = Switch.get_unique(session, switch, compel=True)

        oldinfo = DSDBRunner.snapshot_hw(dbswitch)

        if discover:
            discover_switch(session, logger, self.config, dbswitch, False)

        if vendor and not model:
            model = dbswitch.model.name
        if model:
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       machine_type='switch', compel=True)
            dbswitch.model = dbmodel

        dblocation = get_location(session, rack=rack)
        if dblocation:
            dbswitch.location = dblocation

        if serial is not None:
            dbswitch.serial_no = serial

        # FIXME: What do the error messages for an invalid enum (switch_type)
        # look like?
        if type:
            Switch.check_type(type)
            dbswitch.switch_type = type

        if ip:
            update_primary_ip(session, dbswitch, ip)

        if comments is not None:
            dbswitch.comments = comments

        remove_plenary = None
        if rename_to:
            # Handling alias renaming would not be difficult in AQDB, but the
            # DSDB synchronization would be painful, so don't do that for now.
            # In theory we should check all configured IP addresses for aliases,
            # but this is the most common case
            if dbswitch.primary_name and dbswitch.primary_name.fqdn.aliases:
                raise ArgumentError("The switch has aliases and it cannot be "
                                    "renamed. Please remove all aliases first.")
            remove_plenary = Plenary.get_plenary(dbswitch, logger=logger)
            rename_hardware(session, dbswitch, rename_to)

        if clear:
            session.query(ObservedMac).filter_by(switch=dbswitch).delete()

        if discovered_macs:
            now = datetime.now()
            for (macaddr, port) in discovered_macs:
                update_or_create_observed_mac(session, dbswitch, port, macaddr, now)

        session.flush()

        switch_plenary = Plenary.get_plenary(dbswitch, logger=logger)

        key = switch_plenary.get_write_key()
        if remove_plenary:
            key = CompileKey.merge([key, remove_plenary.get_remove_key()])
        try:
            lock_queue.acquire(key)
            if remove_plenary:
                remove_plenary.stash()
                remove_plenary.remove(locked=True)
            switch_plenary.write(locked=True)

            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.update_host(dbswitch, oldinfo)
            dsdb_runner.commit_or_rollback("Could not update switch in DSDB")
        except:
            if remove_plenary:
                remove_plenary.restore_stash()
            switch_plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
예제 #52
0
    def render(self, session, logger, hostname, manager, interface, mac,
               comments, **arguments):
        dbhost = hostname_to_host(session, hostname)
        dbmachine = dbhost.machine
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        if not manager:
            manager = "%sr.%s" % (dbmachine.primary_name.fqdn.name,
                                  dbmachine.primary_name.fqdn.dns_domain.name)

        dbinterface = get_or_create_interface(session,
                                              dbmachine,
                                              name=interface,
                                              mac=mac,
                                              interface_type='management')

        addrs = ", ".join([
            "%s [%s]" % (addr.logical_name, addr.ip)
            for addr in dbinterface.assignments
        ])
        if addrs:
            raise ArgumentError("{0} already has the following addresses: "
                                "{1}.".format(dbinterface, addrs))

        audit_results = []
        ip = generate_ip(session,
                         logger,
                         dbinterface,
                         compel=True,
                         audit_results=audit_results,
                         **arguments)

        dbdns_rec, newly_created = grab_address(session,
                                                manager,
                                                ip,
                                                comments=comments,
                                                preclude=True)

        assign_address(dbinterface, ip, dbdns_rec.network)

        session.flush()

        plenary_info = PlenaryMachineInfo(dbmachine, logger=logger)
        key = plenary_info.get_write_key()
        try:
            lock_queue.acquire(key)
            plenary_info.write(locked=True)

            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.update_host(dbmachine, oldinfo)
            dsdb_runner.commit_or_rollback("Could not add host to DSDB")
        except:
            plenary_info.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        if dbmachine.host:
            # XXX: Host needs to be reconfigured.
            pass

        for name, value in audit_results:
            self.audit_result(session, name, value, **arguments)
        return
예제 #53
0
    def render(
            self,
            session,
            logger,
            cluster,
            personality,
            max_members,
            fix_location,
            down_hosts_threshold,
            maint_threshold,
            comments,
            # ESX specific options
            switch,
            memory_capacity,
            clear_overrides,
            vm_to_host_ratio,
            **arguments):

        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        if dbcluster.cluster_type == 'meta':
            raise ArgumentError("%s should not be a metacluster." %
                                format(dbcluster))

        cluster_updated = False
        remove_plenaries = PlenaryCollection(logger=logger)
        plenaries = PlenaryCollection(logger=logger)

        (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                             vm_to_host_ratio)
        if down_hosts_threshold is not None:
            (perc, dht) = Cluster.parse_threshold(down_hosts_threshold)
            dbcluster.down_hosts_threshold = dht
            dbcluster.down_hosts_percent = perc
            cluster_updated = True

        if dbcluster.cluster_type == "esx":
            if vm_count is not None or down_hosts_threshold is not None:
                if vm_count is None:
                    vm_count = dbcluster.vm_count
                    host_count = dbcluster.host_count

                dht = dbcluster.down_hosts_threshold
                perc = dbcluster.down_hosts_percent

                dbcluster.validate(vm_part=vm_count,
                                   host_part=host_count,
                                   down_hosts_threshold=dht,
                                   down_hosts_percent=perc)

                dbcluster.vm_count = vm_count
                dbcluster.host_count = host_count
                cluster_updated = True

        if switch is not None:
            if switch:
                # FIXME: Verify that any hosts are on the same network
                dbswitch = Switch.get_unique(session, switch, compel=True)
                plenaries.append(Plenary.get_plenary(dbswitch))
            else:
                dbswitch = None
            dbcluster.switch = dbswitch
            cluster_updated = True

        if memory_capacity is not None:
            dbcluster.memory_capacity = memory_capacity
            dbcluster.validate()
            cluster_updated = True

        if clear_overrides is not None:
            dbcluster.memory_capacity = None
            dbcluster.validate()
            cluster_updated = True

        location_updated = update_cluster_location(session, logger, dbcluster,
                                                   fix_location, plenaries,
                                                   remove_plenaries,
                                                   **arguments)

        if location_updated:
            cluster_updated = True

        if personality:
            archetype = dbcluster.personality.archetype.name
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            if not dbpersonality.is_cluster:
                raise ArgumentError("Personality {0} is not a cluster " +
                                    "personality".format(dbpersonality))
            dbcluster.personality = dbpersonality
            cluster_updated = True

        if max_members is not None:
            current_members = len(dbcluster.hosts)
            if max_members < current_members:
                raise ArgumentError(
                    "%s has %d hosts bound, which exceeds "
                    "the requested limit %d." %
                    (format(dbcluster), current_members, max_members))
            dbcluster.max_hosts = max_members
            cluster_updated = True

        if comments is not None:
            dbcluster.comments = comments
            cluster_updated = True

        if down_hosts_threshold is not None:
            (dbcluster.down_hosts_percent,
             dbcluster.down_hosts_threshold) = \
                Cluster.parse_threshold(down_hosts_threshold)
            cluster_updated = True

        if maint_threshold is not None:
            (dbcluster.down_maint_percent,
             dbcluster.down_maint_threshold) = \
                Cluster.parse_threshold(maint_threshold)
            cluster_updated = True

        if not cluster_updated:
            return

        session.add(dbcluster)
        session.flush()

        plenaries.append(Plenary.get_plenary(dbcluster))
        key = CompileKey.merge(
            [plenaries.get_write_key(),
             remove_plenaries.get_remove_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            plenaries.write(locked=True)
            remove_plenaries.remove(locked=True)
        except:
            remove_plenaries.restore_stash()
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
예제 #54
0
    def render(self, session, logger, machine, chassis, switch, interface,
               fqdn, ip, label, keep_dns, network_environment, **kwargs):

        if machine:
            hwtype = 'machine'
            hwname = machine
        elif chassis:
            hwtype = 'chassis'
            hwname = chassis
        elif switch:
            hwtype = 'switch'
            hwname = switch

        dbhw_ent = HardwareEntity.get_unique(session,
                                             hwname,
                                             hardware_type=hwtype,
                                             compel=True)
        dbinterface = Interface.get_unique(session,
                                           hardware_entity=dbhw_ent,
                                           name=interface,
                                           compel=True)
        dbnet_env = NetworkEnvironment.get_unique_or_default(
            session, network_environment)

        oldinfo = DSDBRunner.snapshot_hw(dbhw_ent)

        if fqdn:
            dbdns_rec = ARecord.get_unique(
                session,
                fqdn=fqdn,
                dns_environment=dbnet_env.dns_environment,
                compel=True)
            ip = dbdns_rec.ip

        addr = None
        if ip:
            addr = first_of(dbinterface.assignments, lambda x: x.ip == ip)
            if not addr:
                raise ArgumentError(
                    "{0} does not have IP address {1} assigned to "
                    "it.".format(dbinterface, ip))
        elif label is not None:
            addr = first_of(dbinterface.assignments,
                            lambda x: x.label == label)
            if not addr:
                raise ArgumentError("{0} does not have an address with label "
                                    "{1}.".format(dbinterface, label))

        if not addr:
            raise ArgumentError("Please specify the address to be removed "
                                "using either --ip, --label, or --fqdn.")

        dbnetwork = addr.network
        ip = addr.ip

        if dbnetwork.network_environment != dbnet_env:
            raise ArgumentError("The specified address lives in {0:l}, not in "
                                "{1:l}.  Use the --network_environment option "
                                "to select the correct environment.".format(
                                    dbnetwork.network_environment, dbnet_env))

        # Forbid removing the primary name
        if ip == dbhw_ent.primary_ip:
            raise ArgumentError("The primary IP address of a hardware entity "
                                "cannot be removed.")

        dbinterface.assignments.remove(addr)

        # Check if the address was assigned to multiple interfaces, and remove
        # the DNS entries if this was the last use
        q = session.query(AddressAssignment)
        q = q.filter_by(network=dbnetwork)
        q = q.filter_by(ip=ip)
        other_uses = q.all()
        if not other_uses and not keep_dns:
            q = session.query(ARecord)
            q = q.filter_by(network=dbnetwork)
            q = q.filter_by(ip=ip)
            q = q.join(ARecord.fqdn)
            q = q.filter_by(dns_environment=dbnet_env.dns_environment)
            map(delete_dns_record, q.all())

        session.flush()

        dbhost = getattr(dbhw_ent, "host", None)
        if dbhost:
            plenary_info = PlenaryHost(dbhost, logger=logger)
            key = plenary_info.get_write_key()
            try:
                lock_queue.acquire(key)
                try:
                    plenary_info.write(locked=True)
                except IncompleteError:
                    # FIXME: if this command is used after "add host" but before
                    # "make", then writing out the template will fail due to
                    # required services not being assigned. Ignore this error
                    # for now.
                    plenary_info.restore_stash()

                dsdb_runner = DSDBRunner(logger=logger)
                dsdb_runner.update_host(dbhw_ent, oldinfo)

                if not other_uses and keep_dns:
                    q = session.query(ARecord)
                    q = q.filter_by(network=dbnetwork)
                    q = q.filter_by(ip=ip)
                    dbdns_rec = q.first()
                    dsdb_runner.add_host_details(dbdns_rec.fqdn, ip)

                dsdb_runner.commit_or_rollback("Could not add host to DSDB")
            except:
                plenary_info.restore_stash()
                raise
            finally:
                lock_queue.release(key)
        else:
            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.update_host(dbhw_ent, oldinfo)
            dsdb_runner.commit_or_rollback("Could not add host to DSDB")

        return
예제 #55
0
    def render(self, session, logger, hostname, domain, sandbox, force,
               **arguments):
        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError("Managing hosts to {0:l} is not allowed."
                                .format(dbbranch))

        dbhost = hostname_to_host(session, hostname)
        dbsource = dbhost.branch
        dbsource_author = dbhost.sandbox_author
        old_branch = dbhost.branch.name

        if dbhost.cluster:
            raise ArgumentError("Cluster nodes must be managed at the "
                                "cluster level; this host is a member of "
                                "{0}.".format(dbhost.cluster))

        if not force:
            validate_branch_commits(dbsource, dbsource_author,
                                    dbbranch, dbauthor, logger, self.config)

        dbhost.branch = dbbranch
        dbhost.sandbox_author = dbauthor
        session.add(dbhost)
        session.flush()
        plenary_host = PlenaryHost(dbhost, logger=logger)

        # We're crossing domains, need to lock everything.
        # XXX: There's a directory per domain.  Do we need subdirectories
        # for different authors for a sandbox?
        key = CompileKey(logger=logger)

        try:
            lock_queue.acquire(key)

            plenary_host.stash()
            plenary_host.cleanup(old_branch, locked=True)

            # Now we recreate the plenary to ensure that the domain is ready
            # to compile, however (esp. if there was no existing template), we
            # have to be aware that there might not be enough information yet
            # with which we can create a template
            try:
                plenary_host.write(locked=True)
            except IncompleteError:
                # This template cannot be written, we leave it alone
                # It would be nice to flag the state in the the host?
                pass
        except:
            # This will not restore the cleaned up files.  That's OK.
            # They will be recreated as needed.
            plenary_host.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return