Exemplo n.º 1
0
    def render(self, session, logger, switch, **arguments):
        dbswitch = Switch.get_unique(session, switch, compel=True)

        # Check and complain if the switch has any other addresses than its
        # primary address
        addrs = []
        for addr in dbswitch.all_addresses():
            if addr.ip == dbswitch.primary_ip:
                continue
            addrs.append(str(addr.ip))
        if addrs:
            raise ArgumentError("{0} still provides the following addresses, "
                                "delete them first: {1}.".format
                                (dbswitch, ", ".join(addrs)))

        dbdns_rec = dbswitch.primary_name
        ip = dbswitch.primary_ip
        old_fqdn = str(dbswitch.primary_name.fqdn)
        old_comments = dbswitch.comments
        session.delete(dbswitch)
        if dbdns_rec:
            delete_dns_record(dbdns_rec)

        session.flush()

        # Any switch ports hanging off this switch should be deleted with
        # the cascade delete of the switch.

        switch_plenary = Plenary.get_plenary(dbswitch, logger=logger)

        # clusters connected to this switch
        plenaries = PlenaryCollection(logger=logger)

        for dbcluster in dbswitch.esx_clusters:
            plenaries.append(Plenary.get_plenary(dbcluster))

        key = CompileKey.merge([switch_plenary.get_remove_key(),
                                plenaries.get_write_key()])

        try:
            lock_queue.acquire(key)
            switch_plenary.stash()
            plenaries.write(locked=True)
            switch_plenary.remove(locked=True)

            if ip:
                dsdb_runner = DSDBRunner(logger=logger)
                # FIXME: restore interface name/MAC on rollback
                dsdb_runner.delete_host_details(old_fqdn, ip, comments=old_comments)
                dsdb_runner.commit_or_rollback("Could not remove switch from DSDB")
            return

        except:
            plenaries.restore_stash()
            switch_plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)
Exemplo n.º 2
0
    def render(self, session, logger, hostname, cluster, personality,
               **arguments):
        dbhost = hostname_to_host(session, hostname)
        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        if dbcluster.status.name == 'decommissioned':
            raise ArgumentError("Cannot add hosts to decommissioned clusters.")

        # We only support changing personality within the same
        # archetype. The archetype decides things like which OS, how
        # it builds (dhcp, etc), whether it's compilable, and
        # switching all of that by side-effect seems wrong
        # somehow. And besides, it would make the user-interface and
        # implementation for this command ugly in order to support
        # changing all of those options.
        personality_change = False
        if personality is not None:
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=dbhost.archetype,
                                                   compel=True)
            if dbhost.personality != dbpersonality:
                dbhost.personality = dbpersonality
                personality_change = True

        # Allow for non-restricted clusters (the default?)
        if (len(dbcluster.allowed_personalities) > 0
                and dbhost.personality not in dbcluster.allowed_personalities):
            raise ArgumentError(
                "The personality %s for %s is not allowed "
                "by the cluster. Specify --personality "
                "and provide one of %s" %
                (dbhost.personality, dbhost.fqdn, ", ".join(
                    [x.name for x in dbcluster.allowed_personalities])))

        # Now that we've changed the personality, we can check
        # if this is a valid membership change
        dbcluster.validate_membership(dbhost)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        if dbhost.cluster and dbhost.cluster != dbcluster:
            logger.client_info("Removing {0:l} from {1:l}.".format(
                dbhost, dbhost.cluster))
            old_cluster = dbhost.cluster
            old_cluster.hosts.remove(dbhost)
            remove_service_addresses(old_cluster, dbhost)
            old_cluster.validate()
            session.expire(dbhost, ['_cluster'])
            plenaries.append(Plenary.get_plenary(old_cluster))

        # Apply the service addresses to the new member
        for res in walk_resources(dbcluster):
            if not isinstance(res, ServiceAddress):
                continue
            apply_service_address(dbhost, res.interfaces, res)

        if dbhost.cluster:
            if personality_change:
                raise ArgumentError(
                    "{0:l} already in {1:l}, use "
                    "aq reconfigure to change personality.".format(
                        dbhost, dbhost.cluster))
            # the cluster has not changed, therefore there's nothing
            # to do here.
            return

        # Calculate the node index: build a map of all possible values, remove
        # the used ones, and pick the smallest remaining one
        node_index_map = set(xrange(len(dbcluster._hosts) + 1))
        for link in dbcluster._hosts:
            # The cluster may have been bigger in the past, so node indexes may
            # be larger than the current cluster size
            try:
                node_index_map.remove(link.node_index)
            except KeyError:
                pass

        dbcluster.hosts.append((dbhost, min(node_index_map)))
        dbcluster.validate()

        # demote a host when switching clusters
        # promote a host when switching clusters
        if dbhost.status.name == 'ready':
            if dbcluster.status.name != 'ready':
                dbalmost = HostLifecycle.get_unique(session,
                                                    'almostready',
                                                    compel=True)
                dbhost.status.transition(dbhost, dbalmost)
                plenaries.append(Plenary.get_plenary(dbhost))
        elif dbhost.status.name == 'almostready':
            if dbcluster.status.name == 'ready':
                dbready = HostLifecycle.get_unique(session,
                                                   'ready',
                                                   compel=True)
                dbhost.status.transition(dbhost, dbready)
                plenaries.append(Plenary.get_plenary(dbhost))

        session.flush()

        # Enforce that service instances are set correctly for the
        # new cluster association.
        chooser = Chooser(dbhost, logger=logger)
        chooser.set_required()
        chooser.flush_changes()
        # the chooser will include the host plenary
        key = CompileKey.merge(
            [chooser.get_write_key(),
             plenaries.get_write_key()])

        try:
            lock_queue.acquire(key)
            chooser.write_plenary_templates(locked=True)
            plenaries.write(locked=True)
        except:
            chooser.restore_stash()
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Exemplo n.º 3
0
    def render(self, session, logger, interface, machine, mac, automac, model,
               vendor, pg, autopg, type, comments, **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)
        audit_results = []

        q = session.query(Interface)
        q = q.filter_by(name=interface, hardware_entity=dbmachine)
        if q.first():
            raise ArgumentError(
                "Machine %s already has an interface named %s." %
                (machine, interface))

        if not type:
            type = 'public'
            management_types = ['bmc', 'ilo', 'ipmi']
            for mtype in management_types:
                if interface.startswith(mtype):
                    type = 'management'
                    break

            if interface.startswith("bond"):
                type = 'bonding'
            elif interface.startswith("br"):
                type = 'bridge'

            # Test it last, VLANs can be added on top of almost anything
            if '.' in interface:
                type = 'vlan'

        if type == "oa" or type == "loopback":
            raise ArgumentError("Interface type '%s' is not valid for "
                                "machines." % type)

        bootable = None
        if type == 'public':
            if interface == 'eth0':
                bootable = True
            else:
                bootable = False

        dbmanager = None
        pending_removals = PlenaryCollection()
        dsdb_runner = DSDBRunner(logger=logger)
        if mac:
            prev = session.query(Interface).filter_by(mac=mac).first()
            if prev and prev.hardware_entity == dbmachine:
                raise ArgumentError("{0} already has an interface with MAC "
                                    "address {1}.".format(dbmachine, mac))
            # Is the conflicting interface something that can be
            # removed?  It is if:
            # - we are currently attempting to add a management interface
            # - the old interface belongs to a machine
            # - the old interface is associated with a host
            # - that host was blindly created, and thus can be removed safely
            if prev and type == 'management' and \
               prev.hardware_entity.hardware_type == 'machine' and \
               prev.hardware_entity.host and \
               prev.hardware_entity.host.status.name == 'blind':
                # FIXME: Is this just always allowed?  Maybe restrict
                # to only aqd-admin and the host itself?
                dummy_machine = prev.hardware_entity
                dummy_ip = dummy_machine.primary_ip
                old_fqdn = str(dummy_machine.primary_name)
                old_iface = prev.name
                old_mac = prev.mac
                old_network = get_net_id_from_ip(session, dummy_ip)
                self.remove_prev(session, logger, prev, pending_removals)
                session.flush()
                dsdb_runner.delete_host_details(old_fqdn, dummy_ip, old_iface,
                                                old_mac)
                self.consolidate_names(session, logger, dbmachine,
                                       dummy_machine.label, pending_removals)
                # It seems like a shame to throw away the IP address that
                # had been allocated for the blind host.  Try to use it
                # as it should be used...
                dbmanager = self.add_manager(session, logger, dbmachine,
                                             dummy_ip, old_network)
            elif prev:
                msg = describe_interface(session, prev)
                raise ArgumentError("MAC address %s is already in use: %s." %
                                    (mac, msg))
        elif automac:
            mac = self.generate_mac(session, dbmachine)
            audit_results.append(('mac', mac))
        else:
            #Ignore now that Mac Address can be null
            pass

        if pg is not None:
            port_group = verify_port_group(dbmachine, pg)
        elif autopg:
            port_group = choose_port_group(session, logger, dbmachine)
            audit_results.append(('pg', port_group))
        else:
            port_group = None

        dbinterface = get_or_create_interface(session,
                                              dbmachine,
                                              name=interface,
                                              vendor=vendor,
                                              model=model,
                                              interface_type=type,
                                              mac=mac,
                                              bootable=bootable,
                                              port_group=port_group,
                                              comments=comments,
                                              preclude=True)

        # So far, we're *only* creating a manager if we happen to be
        # removing a blind entry and we can steal its IP address.
        if dbmanager:
            assign_address(dbinterface, dbmanager.ip, dbmanager.network)

        session.add(dbinterface)
        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if pending_removals and dbmachine.host:
            # Not an exact test, but the file won't be re-written
            # if the contents are the same so calling too often is
            # not a major expense.
            plenaries.append(Plenary.get_plenary(dbmachine.host))
        # Even though there may be removals going on the write key
        # should be sufficient here.
        key = plenaries.get_write_key()
        try:
            lock_queue.acquire(key)
            pending_removals.stash()
            plenaries.write(locked=True)
            pending_removals.remove(locked=True)

            dsdb_runner.update_host(dbmachine, oldinfo)
            dsdb_runner.commit_or_rollback("Could not update host in DSDB")
        except:
            plenaries.restore_stash()
            pending_removals.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        if dbmachine.host:
            # FIXME: reconfigure host
            pass

        for name, value in audit_results:
            self.audit_result(session, name, value, **arguments)
        return
Exemplo n.º 4
0
    def render(self, session, logger, building, city, address, fullname,
               default_dns_domain, comments, **arguments):
        dbbuilding = get_location(session, building=building)

        old_city = dbbuilding.city

        dsdb_runner = DSDBRunner(logger=logger)

        if address is not None:
            old_address = dbbuilding.address
            dbbuilding.address = address
            dsdb_runner.update_building(dbbuilding.name, dbbuilding.address,
                                        old_address)
        if fullname is not None:
            dbbuilding.fullname = fullname
        if comments is not None:
            dbbuilding.comments = comments
        if default_dns_domain is not None:
            if default_dns_domain:
                dbdns_domain = DnsDomain.get_unique(session,
                                                    default_dns_domain,
                                                    compel=True)
                dbbuilding.default_dns_domain = dbdns_domain
            else:
                dbbuilding.default_dns_domain = None

        plenaries = PlenaryCollection(logger=logger)
        if city:
            dbcity = get_location(session, city=city)

            # This one would change the template's locations hence forbidden
            if dbcity.hub != dbbuilding.hub:
                # Doing this both to reduce user error and to limit
                # testing required.
                raise ArgumentError("Cannot change hubs. {0} is in {1} "
                                    "while {2} is in {3}.".format(
                                        dbcity, dbcity.hub, dbbuilding,
                                        dbbuilding.hub))

            # issue svcmap warnings
            maps = 0
            for map_type in [ServiceMap, PersonalityServiceMap]:
                maps = maps + session.query(map_type).\
                    filter_by(location=old_city).count()

            if maps > 0:
                logger.client_info("There are {0} service(s) mapped to the "
                                   "old location of the ({1:l}), please "
                                   "review and manually update mappings for "
                                   "the new location as needed.".format(
                                       maps, dbbuilding.city))

            dbbuilding.update_parent(parent=dbcity)

            if old_city.campus and (old_city.campus != dbcity.campus):
                dsdb_runner.del_campus_building(old_city.campus, building)

            if dbcity.campus and (old_city.campus != dbcity.campus):
                dsdb_runner.add_campus_building(dbcity.campus, building)

            query = session.query(Machine)
            query = query.filter(
                Machine.location_id.in_(dbcity.offspring_ids()))

            for dbmachine in query:
                plenaries.append(PlenaryMachineInfo(dbmachine, logger=logger))

        session.flush()

        if plenaries.plenaries:
            with plenaries.get_write_key() as key:
                plenaries.stash()
                try:
                    plenaries.write(locked=True)
                    dsdb_runner.commit_or_rollback()
                except:
                    plenaries.restore_stash()
        else:
            dsdb_runner.commit_or_rollback()

        return
Exemplo n.º 5
0
    def render(
            self,
            session,
            logger,
            cluster,
            personality,
            max_members,
            fix_location,
            down_hosts_threshold,
            maint_threshold,
            comments,
            # ESX specific options
            switch,
            memory_capacity,
            clear_overrides,
            vm_to_host_ratio,
            **arguments):

        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        if dbcluster.cluster_type == 'meta':
            raise ArgumentError("%s should not be a metacluster." %
                                format(dbcluster))

        cluster_updated = False
        remove_plenaries = PlenaryCollection(logger=logger)
        plenaries = PlenaryCollection(logger=logger)

        (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                             vm_to_host_ratio)
        if down_hosts_threshold is not None:
            (perc, dht) = Cluster.parse_threshold(down_hosts_threshold)
            dbcluster.down_hosts_threshold = dht
            dbcluster.down_hosts_percent = perc
            cluster_updated = True

        if dbcluster.cluster_type == "esx":
            if vm_count is not None or down_hosts_threshold is not None:
                if vm_count is None:
                    vm_count = dbcluster.vm_count
                    host_count = dbcluster.host_count

                dht = dbcluster.down_hosts_threshold
                perc = dbcluster.down_hosts_percent

                dbcluster.validate(vm_part=vm_count,
                                   host_part=host_count,
                                   down_hosts_threshold=dht,
                                   down_hosts_percent=perc)

                dbcluster.vm_count = vm_count
                dbcluster.host_count = host_count
                cluster_updated = True

        if switch is not None:
            if switch:
                # FIXME: Verify that any hosts are on the same network
                dbswitch = Switch.get_unique(session, switch, compel=True)
                plenaries.append(Plenary.get_plenary(dbswitch))
            else:
                dbswitch = None
            dbcluster.switch = dbswitch
            cluster_updated = True

        if memory_capacity is not None:
            dbcluster.memory_capacity = memory_capacity
            dbcluster.validate()
            cluster_updated = True

        if clear_overrides is not None:
            dbcluster.memory_capacity = None
            dbcluster.validate()
            cluster_updated = True

        location_updated = update_cluster_location(session, logger, dbcluster,
                                                   fix_location, plenaries,
                                                   remove_plenaries,
                                                   **arguments)

        if location_updated:
            cluster_updated = True

        if personality:
            archetype = dbcluster.personality.archetype.name
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            if not dbpersonality.is_cluster:
                raise ArgumentError("Personality {0} is not a cluster " +
                                    "personality".format(dbpersonality))
            dbcluster.personality = dbpersonality
            cluster_updated = True

        if max_members is not None:
            current_members = len(dbcluster.hosts)
            if max_members < current_members:
                raise ArgumentError(
                    "%s has %d hosts bound, which exceeds "
                    "the requested limit %d." %
                    (format(dbcluster), current_members, max_members))
            dbcluster.max_hosts = max_members
            cluster_updated = True

        if comments is not None:
            dbcluster.comments = comments
            cluster_updated = True

        if down_hosts_threshold is not None:
            (dbcluster.down_hosts_percent,
             dbcluster.down_hosts_threshold) = \
                Cluster.parse_threshold(down_hosts_threshold)
            cluster_updated = True

        if maint_threshold is not None:
            (dbcluster.down_maint_percent,
             dbcluster.down_maint_threshold) = \
                Cluster.parse_threshold(maint_threshold)
            cluster_updated = True

        if not cluster_updated:
            return

        session.add(dbcluster)
        session.flush()

        plenaries.append(Plenary.get_plenary(dbcluster))
        key = CompileKey.merge(
            [plenaries.get_write_key(),
             remove_plenaries.get_remove_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            plenaries.write(locked=True)
            remove_plenaries.remove(locked=True)
        except:
            remove_plenaries.restore_stash()
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Exemplo n.º 6
0
    def render(self, session, logger, hostname, machine, archetype, domain,
               sandbox, osname, osversion, buildstatus, personality, comments,
               zebra_interfaces, grn, eon_id, skip_dsdb_check=False,
               **arguments):
        dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        section = "archetype_" + dbarchetype.name

        # This is for the various add_*_host commands
        if not domain and not sandbox:
            domain = self.config.get(section, "host_domain")

        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError("Adding hosts to {0:l} is not allowed."
                                .format(dbbranch))

        if not buildstatus:
            buildstatus = 'build'
        dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True)
        dbmachine = Machine.get_unique(session, machine, compel=True)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        if not personality:
            if self.config.has_option(section, "default_personality"):
                personality = self.config.get(section, "default_personality")
            else:
                personality = 'generic'
        dbpersonality = Personality.get_unique(session, name=personality,
                                               archetype=dbarchetype, compel=True)

        if not osname:
            if self.config.has_option(section, "default_osname"):
                osname = self.config.get(section, "default_osname")
        if not osversion:
            if self.config.has_option(section, "default_osversion"):
                osversion = self.config.get(section, "default_osversion")

        if not osname or not osversion:
            raise ArgumentError("Can not determine a sensible default OS "
                                "for archetype %s. Please use the "
                                "--osname and --osversion parameters." %
                                (dbarchetype.name))

        dbos = OperatingSystem.get_unique(session, name=osname,
                                          version=osversion,
                                          archetype=dbarchetype, compel=True)

        if (dbmachine.model.machine_type == 'aurora_node' and
                dbpersonality.archetype.name != 'aurora'):
            raise ArgumentError("Machines of type aurora_node can only be "
                                "added with archetype aurora.")

        if dbmachine.host:
            raise ArgumentError("{0:c} {0.label} is already allocated to "
                                "{1:l}.".format(dbmachine, dbmachine.host))

        if grn or eon_id:
            dbgrn = lookup_grn(session, grn, eon_id, logger=logger,
                               config=self.config)
        else:
            dbgrn = dbpersonality.owner_grn

        dbhost = Host(machine=dbmachine, branch=dbbranch, owner_grn=dbgrn,
                      sandbox_author=dbauthor, personality=dbpersonality,
                      status=dbstatus, operating_system=dbos, comments=comments)
        session.add(dbhost)

        if self.config.has_option("archetype_" + archetype, "default_grn_target"):
            dbhost.grns.append((dbhost, dbgrn,
                                self.config.get("archetype_" + archetype,
                                                "default_grn_target")))

        if zebra_interfaces:
            # --autoip does not make sense for Zebra (at least not the way it's
            # implemented currently)
            dbinterface = None
        else:
            dbinterface = get_boot_interface(dbmachine)

        # This method is allowed to return None. This can only happen
        # (currently) using add_aurora_host, add_windows_host, or possibly by
        # bypassing the aq client and posting a request directly.
        audit_results = []
        ip = generate_ip(session, logger, dbinterface,
                         audit_results=audit_results, **arguments)

        dbdns_rec, newly_created = grab_address(session, hostname, ip,
                                                allow_restricted_domain=True,
                                                allow_reserved=True,
                                                preclude=True)
        dbmachine.primary_name = dbdns_rec

        # Fix up auxiliary addresses to point to the primary name by default
        if ip:
            dns_env = dbdns_rec.fqdn.dns_environment

            for addr in dbmachine.all_addresses():
                if addr.interface.interface_type == "management":
                    continue
                if addr.service_address_id:  # pragma: no cover
                    continue
                for rec in addr.dns_records:
                    if rec.fqdn.dns_environment == dns_env:
                        rec.reverse_ptr = dbdns_rec.fqdn

        if zebra_interfaces:
            if not ip:
                raise ArgumentError("Zebra configuration requires an IP address.")
            dbsrv_addr = self.assign_zebra_address(session, dbmachine, dbdns_rec,
                                                   zebra_interfaces)
        else:
            if ip:
                if not dbinterface:
                    raise ArgumentError("You have specified an IP address for the "
                                        "host, but {0:l} does not have a bootable "
                                        "interface.".format(dbmachine))
                assign_address(dbinterface, ip, dbdns_rec.network)
            dbsrv_addr = None

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if dbmachine.vm_container:
            plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
        if dbsrv_addr:
            plenaries.append(Plenary.get_plenary(dbsrv_addr))

        key = plenaries.get_write_key()
        try:
            lock_queue.acquire(key)
            plenaries.write(locked=True)

            # XXX: This (and some of the code above) is horrible.  There
            # should be a generic/configurable hook here that could kick
            # in based on archetype and/or domain.
            dsdb_runner = DSDBRunner(logger=logger)
            if dbhost.archetype.name == 'aurora':
                # For aurora, check that DSDB has a record of the host.
                if not skip_dsdb_check:
                    try:
                        dsdb_runner.show_host(hostname)
                    except ProcessException, e:
                        raise ArgumentError("Could not find host in DSDB: %s" % e)
            elif not dbmachine.primary_ip:
                logger.info("No IP for %s, not adding to DSDB." % dbmachine.fqdn)
Exemplo n.º 7
0
    def resetadvertisedstatus_list(self, session, logger, dbhosts):
        branches = {}
        authors = {}
        failed = []
        compileable = []
        # Do any cross-list or dependency checks
        for dbhost in dbhosts:
            ## if archetype is compileable only then
            ## validate for branches and domains
            if (dbhost.archetype.is_compileable):
                compileable.append(dbhost.fqdn)
                if dbhost.branch in branches:
                    branches[dbhost.branch].append(dbhost)
                else:
                    branches[dbhost.branch] = [dbhost]
                if dbhost.sandbox_author in authors:
                    authors[dbhost.sandbox_author].append(dbhost)
                else:
                    authors[dbhost.sandbox_author] = [dbhost]

            if dbhost.status.name == 'ready':
                failed.append("{0:l} is in ready status, "
                              "advertised status can be reset only "
                              "when host is in non ready state".format(dbhost))
        if failed:
            raise ArgumentError("Cannot modify the following hosts:\n%s" %
                                "\n".join(failed))
        if len(branches) > 1:
            keys = branches.keys()
            branch_sort = lambda x, y: cmp(len(branches[x]), len(branches[y]))
            keys.sort(cmp=branch_sort)
            stats = ["{0:d} hosts in {1:l}".format(len(branches[branch]),
                                                   branch)
                     for branch in keys]
            raise ArgumentError("All hosts must be in the same domain or "
                                "sandbox:\n%s" % "\n".join(stats))
        if len(authors) > 1:
            keys = authors.keys()
            author_sort = lambda x, y: cmp(len(authors[x]), len(authors[y]))
            keys.sort(cmp=author_sort)
            stats = ["%s hosts with sandbox author %s" %
                     (len(authors[author]), author.name) for author in keys]
            raise ArgumentError("All hosts must be managed by the same "
                                "sandbox author:\n%s" % "\n".join(stats))

        plenaries = PlenaryCollection(logger=logger)
        for dbhost in dbhosts:
            dbhost.advertise_status = False
            session.add(dbhost)
            plenaries.append(PlenaryHost(dbhost, logger=logger))

        session.flush()

        dbbranch = branches.keys()[0]
        dbauthor = authors.keys()[0]
        key = CompileKey.merge([plenaries.get_write_key()])
        try:
            lock_queue.acquire(key)
            plenaries.stash()
            plenaries.write(locked=True)
            td = TemplateDomain(dbbranch, dbauthor, logger=logger)
            td.compile(session, only=compileable, locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Exemplo n.º 8
0
    def render(self, session, logger, hostname, cluster,
               personality, **arguments):
        dbhost = hostname_to_host(session, hostname)
        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        if dbcluster.status.name == 'decommissioned':
            raise ArgumentError("Cannot add hosts to decommissioned clusters.")

        # We only support changing personality within the same
        # archetype. The archetype decides things like which OS, how
        # it builds (dhcp, etc), whether it's compilable, and
        # switching all of that by side-effect seems wrong
        # somehow. And besides, it would make the user-interface and
        # implementation for this command ugly in order to support
        # changing all of those options.
        personality_change = False
        if personality is not None:
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=dbhost.archetype,
                                                   compel=True)
            if dbhost.personality != dbpersonality:
                dbhost.personality = dbpersonality
                personality_change = True

        # Allow for non-restricted clusters (the default?)
        if (len(dbcluster.allowed_personalities) > 0 and
            dbhost.personality not in dbcluster.allowed_personalities):
            raise ArgumentError("The personality %s for %s is not allowed "
                                "by the cluster. Specify --personality "
                                "and provide one of %s" %
                                (dbhost.personality, dbhost.fqdn,
                                 ", ".join([x.name for x in
                                            dbcluster.allowed_personalities])))

        # Now that we've changed the personality, we can check
        # if this is a valid membership change
        dbcluster.validate_membership(dbhost)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        if dbhost.cluster and dbhost.cluster != dbcluster:
            logger.client_info("Removing {0:l} from {1:l}.".format(dbhost,
                                                                   dbhost.cluster))
            old_cluster = dbhost.cluster
            old_cluster.hosts.remove(dbhost)
            remove_service_addresses(old_cluster, dbhost)
            old_cluster.validate()
            session.expire(dbhost, ['_cluster'])
            plenaries.append(Plenary.get_plenary(old_cluster))

        # Apply the service addresses to the new member
        for res in walk_resources(dbcluster):
            if not isinstance(res, ServiceAddress):
                continue
            apply_service_address(dbhost, res.interfaces, res)

        if dbhost.cluster:
            if personality_change:
                raise ArgumentError("{0:l} already in {1:l}, use "
                                    "aq reconfigure to change personality."
                                    .format(dbhost, dbhost.cluster))
            # the cluster has not changed, therefore there's nothing
            # to do here.
            return

        # Calculate the node index: build a map of all possible values, remove
        # the used ones, and pick the smallest remaining one
        node_index_map = set(xrange(len(dbcluster._hosts) + 1))
        for link in dbcluster._hosts:
            # The cluster may have been bigger in the past, so node indexes may
            # be larger than the current cluster size
            try:
                node_index_map.remove(link.node_index)
            except KeyError:
                pass

        dbcluster.hosts.append((dbhost, min(node_index_map)))
        dbcluster.validate()

        # demote a host when switching clusters
        # promote a host when switching clusters
        if dbhost.status.name == 'ready':
            if dbcluster.status.name != 'ready':
                dbalmost = HostLifecycle.get_unique(session, 'almostready',
                                                    compel=True)
                dbhost.status.transition(dbhost, dbalmost)
                plenaries.append(Plenary.get_plenary(dbhost))
        elif dbhost.status.name == 'almostready':
            if dbcluster.status.name == 'ready':
                dbready = HostLifecycle.get_unique(session, 'ready',
                                                   compel=True)
                dbhost.status.transition(dbhost, dbready)
                plenaries.append(Plenary.get_plenary(dbhost))

        session.flush()

        # Enforce that service instances are set correctly for the
        # new cluster association.
        chooser = Chooser(dbhost, logger=logger)
        chooser.set_required()
        chooser.flush_changes()
        # the chooser will include the host plenary
        key = CompileKey.merge([chooser.get_write_key(),
                                plenaries.get_write_key()])

        try:
            lock_queue.acquire(key)
            chooser.write_plenary_templates(locked=True)
            plenaries.write(locked=True)
        except:
            chooser.restore_stash()
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Exemplo n.º 9
0
    def resetadvertisedstatus_list(self, session, logger, dbhosts):
        branches = {}
        authors = {}
        failed = []
        compileable = []
        # Do any cross-list or dependency checks
        for dbhost in dbhosts:
            ## if archetype is compileable only then
            ## validate for branches and domains
            if (dbhost.archetype.is_compileable):
                compileable.append(dbhost.fqdn)
                if dbhost.branch in branches:
                    branches[dbhost.branch].append(dbhost)
                else:
                    branches[dbhost.branch] = [dbhost]
                if dbhost.sandbox_author in authors:
                    authors[dbhost.sandbox_author].append(dbhost)
                else:
                    authors[dbhost.sandbox_author] = [dbhost]

            if dbhost.status.name == 'ready':
                failed.append("{0:l} is in ready status, "
                              "advertised status can be reset only "
                              "when host is in non ready state".format(dbhost))
        if failed:
            raise ArgumentError("Cannot modify the following hosts:\n%s" %
                                "\n".join(failed))
        if len(branches) > 1:
            keys = branches.keys()
            branch_sort = lambda x, y: cmp(len(branches[x]), len(branches[y]))
            keys.sort(cmp=branch_sort)
            stats = [
                "{0:d} hosts in {1:l}".format(len(branches[branch]), branch)
                for branch in keys
            ]
            raise ArgumentError("All hosts must be in the same domain or "
                                "sandbox:\n%s" % "\n".join(stats))
        if len(authors) > 1:
            keys = authors.keys()
            author_sort = lambda x, y: cmp(len(authors[x]), len(authors[y]))
            keys.sort(cmp=author_sort)
            stats = [
                "%s hosts with sandbox author %s" %
                (len(authors[author]), author.name) for author in keys
            ]
            raise ArgumentError("All hosts must be managed by the same "
                                "sandbox author:\n%s" % "\n".join(stats))

        plenaries = PlenaryCollection(logger=logger)
        for dbhost in dbhosts:
            dbhost.advertise_status = False
            session.add(dbhost)
            plenaries.append(PlenaryHost(dbhost, logger=logger))

        session.flush()

        dbbranch = branches.keys()[0]
        dbauthor = authors.keys()[0]
        key = CompileKey.merge([plenaries.get_write_key()])
        try:
            lock_queue.acquire(key)
            plenaries.stash()
            plenaries.write(locked=True)
            td = TemplateDomain(dbbranch, dbauthor, logger=logger)
            td.compile(session, only=compileable, locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Exemplo n.º 10
0
    def render(self, session, logger, hostname, **arguments):
        # removing the plenary host requires a compile lock, however
        # we want to avoid deadlock by the fact that we're messing
        # with two locks here, so we want to be careful. We grab the
        # plenaryhost early on (in order to get the filenames filled
        # in from the db info before we delete it from the db. We then
        # hold onto those references until we've completed the db
        # cleanup and if all of that is successful, then we delete the
        # plenary file (which doesn't require re-evaluating any stale
        # db information) after we've released the delhost lock.
        delplenary = False

        # Any service bindings that we need to clean up afterwards
        bindings = PlenaryCollection(logger=logger)
        resources = PlenaryCollection(logger=logger)
        with DeleteKey("system", logger=logger) as key:
            # Check dependencies, translate into user-friendly message
            dbhost = hostname_to_host(session, hostname)
            host_plenary = Plenary.get_plenary(dbhost, logger=logger)
            domain = dbhost.branch.name
            deps = get_host_dependencies(session, dbhost)
            if (len(deps) != 0):
                deptext = "\n".join(["  %s" % d for d in deps])
                raise ArgumentError("Cannot delete host %s due to the "
                                    "following dependencies:\n%s." %
                                    (hostname, deptext))

            archetype = dbhost.archetype.name
            dbmachine = dbhost.machine
            oldinfo = DSDBRunner.snapshot_hw(dbmachine)

            ip = dbmachine.primary_ip
            fqdn = dbmachine.fqdn

            for si in dbhost.services_used:
                plenary = PlenaryServiceInstanceServer(si)
                bindings.append(plenary)
                logger.info("Before deleting host '%s', removing binding '%s'"
                            % (fqdn, si.cfg_path))

            del dbhost.services_used[:]

            if dbhost.resholder:
                for res in dbhost.resholder.resources:
                    resources.append(Plenary.get_plenary(res))

            # In case of Zebra, the IP may be configured on multiple interfaces
            for iface in dbmachine.interfaces:
                if ip in iface.addresses:
                    iface.addresses.remove(ip)

            if dbhost.cluster:
                dbcluster = dbhost.cluster
                dbcluster.hosts.remove(dbhost)
                set_committed_value(dbhost, '_cluster', None)
                dbcluster.validate()

            dbdns_rec = dbmachine.primary_name
            dbmachine.primary_name = None
            dbmachine.host = None
            session.delete(dbhost)
            delete_dns_record(dbdns_rec)
            session.flush()
            delplenary = True

            if dbmachine.vm_container:
                bindings.append(Plenary.get_plenary(dbmachine.vm_container))

            if archetype != 'aurora' and ip is not None:
                dsdb_runner = DSDBRunner(logger=logger)
                dsdb_runner.update_host(dbmachine, oldinfo)
                dsdb_runner.commit_or_rollback("Could not remove host %s from "
                                               "DSDB" % hostname)
            if archetype == 'aurora':
                logger.client_info("WARNING: removing host %s from AQDB and "
                                   "*not* changing DSDB." % hostname)

            # Past the point of no return... commit the transaction so
            # that we can free the delete lock.
            session.commit()

        # Only if we got here with no exceptions do we clean the template
        # Trying to clean up after any errors here is really difficult
        # since the changes to dsdb have already been made.
        if (delplenary):
            key = host_plenary.get_remove_key()
            with CompileKey.merge([key, bindings.get_write_key(),
                                   resources.get_remove_key()]) as key:
                host_plenary.cleanup(domain, locked=True)
                # And we also want to remove the profile itself
                profiles = self.config.get("broker", "profilesdir")
                # Only one of these should exist, but it doesn't hurt
                # to try to clean up both.
                xmlfile = os.path.join(profiles, fqdn + ".xml")
                remove_file(xmlfile, logger=logger)
                xmlgzfile = xmlfile + ".gz"
                remove_file(xmlgzfile, logger=logger)
                # And the cached template created by ant
                remove_file(os.path.join(self.config.get("broker",
                                                         "quattordir"),
                                         "objects", fqdn + TEMPLATE_EXTENSION),
                            logger=logger)
                bindings.write(locked=True)
                resources.remove(locked=True)

            build_index(self.config, session, profiles, logger=logger)

        return
Exemplo n.º 11
0
    def render(self, session, logger, cluster, personality,
               max_members, fix_location, down_hosts_threshold,
               maint_threshold, comments,
               # ESX specific options
               switch, memory_capacity, clear_overrides, vm_to_host_ratio,
               **arguments):

        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        if dbcluster.cluster_type == 'meta':
            raise ArgumentError("%s should not be a metacluster."
                                % format(dbcluster))

        cluster_updated = False
        remove_plenaries = PlenaryCollection(logger=logger)
        plenaries = PlenaryCollection(logger=logger)

        (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                             vm_to_host_ratio)
        if down_hosts_threshold is not None:
            (perc, dht) = Cluster.parse_threshold(down_hosts_threshold)
            dbcluster.down_hosts_threshold = dht
            dbcluster.down_hosts_percent = perc
            cluster_updated = True

        if dbcluster.cluster_type == "esx":
            if vm_count is not None or down_hosts_threshold is not None:
                if vm_count is None:
                    vm_count = dbcluster.vm_count
                    host_count = dbcluster.host_count

                dht = dbcluster.down_hosts_threshold
                perc = dbcluster.down_hosts_percent

                dbcluster.validate(vm_part=vm_count, host_part=host_count,
                                   down_hosts_threshold=dht,
                                   down_hosts_percent=perc)

                dbcluster.vm_count = vm_count
                dbcluster.host_count = host_count
                cluster_updated = True

        if switch is not None:
            if switch:
                # FIXME: Verify that any hosts are on the same network
                dbswitch = Switch.get_unique(session, switch, compel=True)
                plenaries.append(Plenary.get_plenary(dbswitch))
            else:
                dbswitch = None
            dbcluster.switch = dbswitch
            cluster_updated = True

        if memory_capacity is not None:
            dbcluster.memory_capacity = memory_capacity
            dbcluster.validate()
            cluster_updated = True

        if clear_overrides is not None:
            dbcluster.memory_capacity = None
            dbcluster.validate()
            cluster_updated = True

        location_updated = update_cluster_location(session, logger,
                                          dbcluster, fix_location,
                                          plenaries, remove_plenaries,
                                          **arguments)

        if location_updated:
            cluster_updated = True

        if personality:
            archetype = dbcluster.personality.archetype.name
            dbpersonality = Personality.get_unique(session, name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            if not dbpersonality.is_cluster:
                raise ArgumentError("Personality {0} is not a cluster " +
                                    "personality".format(dbpersonality))
            dbcluster.personality = dbpersonality
            cluster_updated = True

        if max_members is not None:
            current_members = len(dbcluster.hosts)
            if max_members < current_members:
                raise ArgumentError("%s has %d hosts bound, which exceeds "
                                    "the requested limit %d." %
                                    (format(dbcluster), current_members,
                                     max_members))
            dbcluster.max_hosts = max_members
            cluster_updated = True

        if comments is not None:
            dbcluster.comments = comments
            cluster_updated = True

        if down_hosts_threshold is not None:
            (dbcluster.down_hosts_percent,
             dbcluster.down_hosts_threshold) = \
                Cluster.parse_threshold(down_hosts_threshold)
            cluster_updated = True

        if maint_threshold is not None:
            (dbcluster.down_maint_percent,
             dbcluster.down_maint_threshold) = \
                Cluster.parse_threshold(maint_threshold)
            cluster_updated = True

        if not cluster_updated:
            return

        session.add(dbcluster)
        session.flush()

        plenaries.append(Plenary.get_plenary(dbcluster))
        key = CompileKey.merge([plenaries.get_write_key(),
                                remove_plenaries.get_remove_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            plenaries.write(locked=True)
            remove_plenaries.remove(locked=True)
        except:
            remove_plenaries.restore_stash()
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Exemplo n.º 12
0
    def render(self, session, logger, interface, machine, mac, automac,
               model, vendor, pg, autopg, type, comments, **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)
        audit_results = []

        q = session.query(Interface)
        q = q.filter_by(name=interface, hardware_entity=dbmachine)
        if q.first():
            raise ArgumentError("Machine %s already has an interface named %s."
                    % (machine, interface))

        if not type:
            type = 'public'
            management_types = ['bmc', 'ilo', 'ipmi']
            for mtype in management_types:
                if interface.startswith(mtype):
                    type = 'management'
                    break

            if interface.startswith("bond"):
                type = 'bonding'
            elif interface.startswith("br"):
                type = 'bridge'

            # Test it last, VLANs can be added on top of almost anything
            if '.' in interface:
                type = 'vlan'

        if type == "oa" or type == "loopback":
            raise ArgumentError("Interface type '%s' is not valid for "
                                "machines." % type)

        bootable = None
        if type == 'public':
            if interface == 'eth0':
                bootable = True
            else:
                bootable = False

        dbmanager = None
        pending_removals = PlenaryCollection()
        dsdb_runner = DSDBRunner(logger=logger)
        if mac:
            prev = session.query(Interface).filter_by(mac=mac).first()
            if prev and prev.hardware_entity == dbmachine:
                raise ArgumentError("{0} already has an interface with MAC "
                                    "address {1}.".format(dbmachine, mac))
            # Is the conflicting interface something that can be
            # removed?  It is if:
            # - we are currently attempting to add a management interface
            # - the old interface belongs to a machine
            # - the old interface is associated with a host
            # - that host was blindly created, and thus can be removed safely
            if prev and type == 'management' and \
               prev.hardware_entity.hardware_type == 'machine' and \
               prev.hardware_entity.host and \
               prev.hardware_entity.host.status.name == 'blind':
                # FIXME: Is this just always allowed?  Maybe restrict
                # to only aqd-admin and the host itself?
                dummy_machine = prev.hardware_entity
                dummy_ip = dummy_machine.primary_ip
                old_fqdn = str(dummy_machine.primary_name)
                old_iface = prev.name
                old_mac = prev.mac
                old_network = get_net_id_from_ip(session, dummy_ip)
                self.remove_prev(session, logger, prev, pending_removals)
                session.flush()
                dsdb_runner.delete_host_details(old_fqdn, dummy_ip, old_iface,
                                                old_mac)
                self.consolidate_names(session, logger, dbmachine,
                                       dummy_machine.label, pending_removals)
                # It seems like a shame to throw away the IP address that
                # had been allocated for the blind host.  Try to use it
                # as it should be used...
                dbmanager = self.add_manager(session, logger, dbmachine,
                                             dummy_ip, old_network)
            elif prev:
                msg = describe_interface(session, prev)
                raise ArgumentError("MAC address %s is already in use: %s." %
                                    (mac, msg))
        elif automac:
            mac = self.generate_mac(session, dbmachine)
            audit_results.append(('mac', mac))
        else:
            #Ignore now that Mac Address can be null
            pass

        if pg is not None:
            port_group = verify_port_group(dbmachine, pg)
        elif autopg:
            port_group = choose_port_group(session, logger, dbmachine)
            audit_results.append(('pg', port_group))
        else:
            port_group = None

        dbinterface = get_or_create_interface(session, dbmachine,
                                              name=interface,
                                              vendor=vendor, model=model,
                                              interface_type=type, mac=mac,
                                              bootable=bootable,
                                              port_group=port_group,
                                              comments=comments, preclude=True)

        # So far, we're *only* creating a manager if we happen to be
        # removing a blind entry and we can steal its IP address.
        if dbmanager:
            assign_address(dbinterface, dbmanager.ip, dbmanager.network)

        session.add(dbinterface)
        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if pending_removals and dbmachine.host:
            # Not an exact test, but the file won't be re-written
            # if the contents are the same so calling too often is
            # not a major expense.
            plenaries.append(Plenary.get_plenary(dbmachine.host))
        # Even though there may be removals going on the write key
        # should be sufficient here.
        key = plenaries.get_write_key()
        try:
            lock_queue.acquire(key)
            pending_removals.stash()
            plenaries.write(locked=True)
            pending_removals.remove(locked=True)

            dsdb_runner.update_host(dbmachine, oldinfo)
            dsdb_runner.commit_or_rollback("Could not update host in DSDB")
        except:
            plenaries.restore_stash()
            pending_removals.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        if dbmachine.host:
            # FIXME: reconfigure host
            pass

        for name, value in audit_results:
            self.audit_result(session, name, value, **arguments)
        return
Exemplo n.º 13
0
    def render(self, session, logger, hostname, **arguments):
        # removing the plenary host requires a compile lock, however
        # we want to avoid deadlock by the fact that we're messing
        # with two locks here, so we want to be careful. We grab the
        # plenaryhost early on (in order to get the filenames filled
        # in from the db info before we delete it from the db. We then
        # hold onto those references until we've completed the db
        # cleanup and if all of that is successful, then we delete the
        # plenary file (which doesn't require re-evaluating any stale
        # db information) after we've released the delhost lock.
        delplenary = False

        # Any service bindings that we need to clean up afterwards
        bindings = PlenaryCollection(logger=logger)
        resources = PlenaryCollection(logger=logger)
        with DeleteKey("system", logger=logger) as key:
            # Check dependencies, translate into user-friendly message
            dbhost = hostname_to_host(session, hostname)
            host_plenary = Plenary.get_plenary(dbhost, logger=logger)
            domain = dbhost.branch.name
            deps = get_host_dependencies(session, dbhost)
            if (len(deps) != 0):
                deptext = "\n".join(["  %s" % d for d in deps])
                raise ArgumentError("Cannot delete host %s due to the "
                                    "following dependencies:\n%s." %
                                    (hostname, deptext))

            archetype = dbhost.archetype.name
            dbmachine = dbhost.machine
            oldinfo = DSDBRunner.snapshot_hw(dbmachine)

            ip = dbmachine.primary_ip
            fqdn = dbmachine.fqdn

            for si in dbhost.services_used:
                plenary = PlenaryServiceInstanceServer(si)
                bindings.append(plenary)
                logger.info(
                    "Before deleting host '%s', removing binding '%s'" %
                    (fqdn, si.cfg_path))

            del dbhost.services_used[:]

            if dbhost.resholder:
                for res in dbhost.resholder.resources:
                    resources.append(Plenary.get_plenary(res))

            # In case of Zebra, the IP may be configured on multiple interfaces
            for iface in dbmachine.interfaces:
                if ip in iface.addresses:
                    iface.addresses.remove(ip)

            if dbhost.cluster:
                dbcluster = dbhost.cluster
                dbcluster.hosts.remove(dbhost)
                set_committed_value(dbhost, '_cluster', None)
                dbcluster.validate()

            dbdns_rec = dbmachine.primary_name
            dbmachine.primary_name = None
            dbmachine.host = None
            session.delete(dbhost)
            delete_dns_record(dbdns_rec)
            session.flush()
            delplenary = True

            if dbmachine.vm_container:
                bindings.append(Plenary.get_plenary(dbmachine.vm_container))

            if archetype != 'aurora' and ip is not None:
                dsdb_runner = DSDBRunner(logger=logger)
                dsdb_runner.update_host(dbmachine, oldinfo)
                dsdb_runner.commit_or_rollback("Could not remove host %s from "
                                               "DSDB" % hostname)
            if archetype == 'aurora':
                logger.client_info("WARNING: removing host %s from AQDB and "
                                   "*not* changing DSDB." % hostname)

            # Past the point of no return... commit the transaction so
            # that we can free the delete lock.
            session.commit()

        # Only if we got here with no exceptions do we clean the template
        # Trying to clean up after any errors here is really difficult
        # since the changes to dsdb have already been made.
        if (delplenary):
            key = host_plenary.get_remove_key()
            with CompileKey.merge(
                [key,
                 bindings.get_write_key(),
                 resources.get_remove_key()]) as key:
                host_plenary.cleanup(domain, locked=True)
                # And we also want to remove the profile itself
                profiles = self.config.get("broker", "profilesdir")
                # Only one of these should exist, but it doesn't hurt
                # to try to clean up both.
                xmlfile = os.path.join(profiles, fqdn + ".xml")
                remove_file(xmlfile, logger=logger)
                xmlgzfile = xmlfile + ".gz"
                remove_file(xmlgzfile, logger=logger)
                # And the cached template created by ant
                remove_file(os.path.join(
                    self.config.get("broker", "quattordir"), "objects",
                    fqdn + TEMPLATE_EXTENSION),
                            logger=logger)
                bindings.write(locked=True)
                resources.remove(locked=True)

            build_index(self.config, session, profiles, logger=logger)

        return
Exemplo n.º 14
0
    def render(self, session, logger, cluster, archetype, personality, domain,
               sandbox, max_members, down_hosts_threshold, maint_threshold,
               buildstatus, comments, vm_to_host_ratio, switch, metacluster,
               **arguments):

        validate_basic("cluster", cluster)
        dbpersonality = Personality.get_unique(session,
                                               name=personality,
                                               archetype=archetype,
                                               compel=True)
        if not dbpersonality.is_cluster:
            raise ArgumentError("%s is not a cluster personality." %
                                personality)

        ctype = dbpersonality.archetype.cluster_type
        section = "archetype_" + dbpersonality.archetype.name

        if not buildstatus:
            buildstatus = "build"
        dbstatus = ClusterLifecycle.get_unique(session,
                                               buildstatus,
                                               compel=True)

        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError(
                "Adding clusters to {0:l} is not allowed.".format(dbbranch))

        dbloc = get_location(session, **arguments)
        if not dbloc:
            raise ArgumentError("Adding a cluster requires a location "
                                "constraint.")
        if not dbloc.campus:
            raise ArgumentError("{0} is not within a campus.".format(dbloc))

        if max_members is None:
            if self.config.has_option(section, "max_members_default"):
                max_members = self.config.getint(section,
                                                 "max_members_default")

        Cluster.get_unique(session, cluster, preclude=True)
        # Not finding the cluster type is an internal consistency issue, so make
        # that show up in the logs by using AquilonError
        clus_type = Cluster.polymorphic_subclass(ctype,
                                                 "Unknown cluster type",
                                                 error=AquilonError)

        (down_hosts_pct, dht) = Cluster.parse_threshold(down_hosts_threshold)

        kw = {
            'name': cluster,
            'location_constraint': dbloc,
            'personality': dbpersonality,
            'max_hosts': max_members,
            'branch': dbbranch,
            'sandbox_author': dbauthor,
            'down_hosts_threshold': dht,
            'down_hosts_percent': down_hosts_pct,
            'status': dbstatus,
            'comments': comments
        }

        if ctype == 'esx':
            if vm_to_host_ratio is None:
                if self.config.has_option(section, "vm_to_host_ratio"):
                    vm_to_host_ratio = self.config.get(section,
                                                       "vm_to_host_ratio")
                else:
                    vm_to_host_ratio = "1:1"
            (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                                 vm_to_host_ratio)
            kw["vm_count"] = vm_count
            kw["host_count"] = host_count

        if switch and hasattr(clus_type, 'switch'):
            kw['switch'] = Switch.get_unique(session, switch, compel=True)

        if maint_threshold is not None:
            (down_hosts_pct, dht) = Cluster.parse_threshold(maint_threshold)
            kw['down_maint_threshold'] = dht
            kw['down_maint_percent'] = down_hosts_pct

        dbcluster = clus_type(**kw)

        plenaries = PlenaryCollection(logger=logger)

        if metacluster:
            dbmetacluster = MetaCluster.get_unique(session,
                                                   metacluster,
                                                   compel=True)

            dbmetacluster.validate_membership(dbcluster)
            dbmetacluster.members.append(dbcluster)

            plenaries.append(Plenary.get_plenary(dbmetacluster))

        session.add(dbcluster)
        session.flush()
        session.refresh(dbcluster)

        plenaries.append(Plenary.get_plenary(dbcluster))

        key = plenaries.get_write_key()

        try:
            lock_queue.acquire(key)
            plenaries.write(locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)
Exemplo n.º 15
0
    def render(self, session, logger, machine, model, vendor, serial,
               chassis, slot, clearchassis, multislot,
               vmhost, cluster, allow_metacluster_change,
               cpuname, cpuvendor, cpuspeed, cpucount, memory, ip,
               **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        plenaries = PlenaryCollection(logger=logger)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        if clearchassis:
            del dbmachine.chassis_slot[:]

        remove_plenaries = PlenaryCollection(logger=logger)
        if chassis:
            dbchassis = Chassis.get_unique(session, chassis, compel=True)
            if machine_plenary_will_move(old=dbmachine.location,
                                         new=dbchassis.location):
                remove_plenaries.append(Plenary.get_plenary(dbmachine))
            dbmachine.location = dbchassis.location
            if slot is None:
                raise ArgumentError("Option --chassis requires --slot "
                                    "information.")
            self.adjust_slot(session, logger,
                             dbmachine, dbchassis, slot, multislot)
        elif slot is not None:
            dbchassis = None
            for dbslot in dbmachine.chassis_slot:
                if dbchassis and dbslot.chassis != dbchassis:
                    raise ArgumentError("Machine in multiple chassis, please "
                                        "use --chassis argument.")
                dbchassis = dbslot.chassis
            if not dbchassis:
                raise ArgumentError("Option --slot requires --chassis "
                                    "information.")
            self.adjust_slot(session, logger,
                             dbmachine, dbchassis, slot, multislot)

        dblocation = get_location(session, **arguments)
        if dblocation:
            loc_clear_chassis = False
            for dbslot in dbmachine.chassis_slot:
                dbcl = dbslot.chassis.location
                if dbcl != dblocation:
                    if chassis or slot is not None:
                        raise ArgumentError("{0} conflicts with chassis {1!s} "
                                            "location {2}.".format(dblocation,
                                                        dbslot.chassis, dbcl))
                    else:
                        loc_clear_chassis = True
            if loc_clear_chassis:
                del dbmachine.chassis_slot[:]
            if machine_plenary_will_move(old=dbmachine.location,
                                         new=dblocation):
                remove_plenaries.append(Plenary.get_plenary(dbmachine))
            dbmachine.location = dblocation

        if model or vendor:
            # If overriding model, should probably overwrite default
            # machine specs as well.
            if not model:
                model = dbmachine.model.name
            if not vendor:
                vendor = dbmachine.model.vendor.name
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       compel=True)
            if dbmodel.machine_type not in ['blade', 'rackmount',
                                            'workstation', 'aurora_node',
                                            'virtual_machine']:
                raise ArgumentError("The update_machine command cannot update "
                                    "machines of type %s." %
                                    dbmodel.machine_type)
            # We probably could do this by forcing either cluster or
            # location data to be available as appropriate, but really?
            # Failing seems reasonable.
            if dbmodel.machine_type != dbmachine.model.machine_type and \
               'virtual_machine' in [dbmodel.machine_type,
                                     dbmachine.model.machine_type]:
                raise ArgumentError("Cannot change machine from %s to %s." %
                                    (dbmachine.model.machine_type,
                                     dbmodel.machine_type))

            old_nic_model = dbmachine.model.nic_model
            new_nic_model = dbmodel.nic_model
            if old_nic_model != new_nic_model:
                for iface in dbmachine.interfaces:
                    if iface.model == old_nic_model:
                        iface.model = new_nic_model

            dbmachine.model = dbmodel

        if cpuname or cpuvendor or cpuspeed is not None:
            dbcpu = Cpu.get_unique(session, name=cpuname, vendor=cpuvendor,
                                   speed=cpuspeed, compel=True)
            dbmachine.cpu = dbcpu

        if cpucount is not None:
            dbmachine.cpu_quantity = cpucount
        if memory is not None:
            dbmachine.memory = memory
        if serial:
            dbmachine.serial_no = serial

        if ip:
            update_primary_ip(session, dbmachine, ip)

        # FIXME: For now, if a machine has its interface(s) in a portgroup
        # this command will need to be followed by an update_interface to
        # re-evaluate the portgroup for overflow.
        # It would be better to have --pg and --autopg options to let it
        # happen at this point.
        if cluster or vmhost:
            if not dbmachine.vm_container:
                raise ArgumentError("Cannot convert a physical machine to "
                                    "virtual.")

            old_holder = dbmachine.vm_container.holder.holder_object
            resholder = get_resource_holder(session, hostname=vmhost,
                                            cluster=cluster, compel=False)
            new_holder = resholder.holder_object

            # TODO: do we want to allow moving machines between the cluster and
            # metacluster level?
            if new_holder.__class__ != old_holder.__class__:
                raise ArgumentError("Cannot move a VM between a cluster and a "
                                    "stand-alone host.")

            if cluster:
                if new_holder.metacluster != old_holder.metacluster \
                   and not allow_metacluster_change:
                    raise ArgumentError("Current {0:l} does not match "
                                        "new {1:l}."
                                        .format(old_holder.metacluster,
                                                new_holder.metacluster))

            remove_plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
            dbmachine.vm_container.holder = resholder

            for dbdisk in dbmachine.disks:
                if not isinstance(dbdisk, VirtualDisk):
                    continue
                old_share = dbdisk.share
                if isinstance(old_share.holder, BundleResource):
                    resourcegroup = old_share.holder.name
                else:
                    resourcegroup = None
                new_share = find_share(new_holder, resourcegroup, old_share.name,
                                       error=ArgumentError)

                # If the shares are registered at the metacluster level and both
                # clusters are in the same metacluster, then there will be no
                # real change here
                if new_share != old_share:
                    old_share.disks.remove(dbdisk)
                    new_share.disks.append(dbdisk)

            if isinstance(new_holder, Cluster):
                dbmachine.location = new_holder.location_constraint
            else:
                dbmachine.location = new_holder.location

            session.flush()
            plenaries.append(Plenary.get_plenary(old_holder))
            plenaries.append(Plenary.get_plenary(new_holder))

        if dbmachine.vm_container:
            plenaries.append(Plenary.get_plenary(dbmachine.vm_container))

        session.flush()

        # Check if the changed parameters still meet cluster capacity
        # requiremets
        if dbmachine.cluster:
            dbmachine.cluster.validate()
            if allow_metacluster_change:
                dbmachine.cluster.metacluster.validate()
        if dbmachine.host and dbmachine.host.cluster:
            dbmachine.host.cluster.validate()

        # The check to make sure a plenary file is not written out for
        # dummy aurora hardware is within the call to write().  This way
        # it is consistent without altering (and forgetting to alter)
        # all the calls to the method.
        plenaries.append(Plenary.get_plenary(dbmachine))
        if remove_plenaries.plenaries and dbmachine.host:
            plenaries.append(Plenary.get_plenary(dbmachine.host))

        key = CompileKey.merge([plenaries.get_write_key(),
                                remove_plenaries.get_remove_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            plenaries.write(locked=True)
            remove_plenaries.remove(locked=True)

            if dbmachine.host:
                # XXX: May need to reconfigure.
                pass

            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.update_host(dbmachine, oldinfo)
            dsdb_runner.commit_or_rollback("Could not update machine in DSDB")
        except:
            plenaries.restore_stash()
            remove_plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Exemplo n.º 16
0
    def render(self, session, logger, cluster, archetype, personality, domain,
               sandbox, max_members, down_hosts_threshold, maint_threshold,
               buildstatus, comments, vm_to_host_ratio, switch, metacluster,
               **arguments):

        validate_basic("cluster", cluster)
        dbpersonality = Personality.get_unique(session, name=personality,
                                               archetype=archetype, compel=True)
        if not dbpersonality.is_cluster:
            raise ArgumentError("%s is not a cluster personality." %
                                personality)

        ctype = dbpersonality.archetype.cluster_type
        section = "archetype_" + dbpersonality.archetype.name

        if not buildstatus:
            buildstatus = "build"
        dbstatus = ClusterLifecycle.get_unique(session, buildstatus,
                                               compel=True)

        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError("Adding clusters to {0:l} is not allowed."
                                .format(dbbranch))

        dbloc = get_location(session, **arguments)
        if not dbloc:
            raise ArgumentError("Adding a cluster requires a location "
                                "constraint.")
        if not dbloc.campus:
            raise ArgumentError("{0} is not within a campus.".format(dbloc))

        if max_members is None:
            if self.config.has_option(section, "max_members_default"):
                max_members = self.config.getint(section, "max_members_default")

        Cluster.get_unique(session, cluster, preclude=True)
        # Not finding the cluster type is an internal consistency issue, so make
        # that show up in the logs by using AquilonError
        clus_type = Cluster.polymorphic_subclass(ctype, "Unknown cluster type",
                                                 error=AquilonError)

        (down_hosts_pct, dht) = Cluster.parse_threshold(down_hosts_threshold)

        kw = {'name': cluster,
              'location_constraint': dbloc,
              'personality': dbpersonality,
              'max_hosts': max_members,
              'branch': dbbranch,
              'sandbox_author': dbauthor,
              'down_hosts_threshold': dht,
              'down_hosts_percent': down_hosts_pct,
              'status': dbstatus,
              'comments': comments}

        if ctype == 'esx':
            if vm_to_host_ratio is None:
                if self.config.has_option(section, "vm_to_host_ratio"):
                    vm_to_host_ratio = self.config.get(section,
                                                       "vm_to_host_ratio")
                else:
                    vm_to_host_ratio = "1:1"
            (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                                 vm_to_host_ratio)
            kw["vm_count"] = vm_count
            kw["host_count"] = host_count

        if switch and hasattr(clus_type, 'switch'):
            kw['switch'] = Switch.get_unique(session, switch, compel=True)

        if maint_threshold is not None:
            (down_hosts_pct, dht) = Cluster.parse_threshold(maint_threshold)
            kw['down_maint_threshold'] = dht
            kw['down_maint_percent'] = down_hosts_pct

        dbcluster = clus_type(**kw)

        plenaries = PlenaryCollection(logger=logger)

        if metacluster:
            dbmetacluster = MetaCluster.get_unique(session,
                                                   metacluster,
                                                   compel=True)

            dbmetacluster.validate_membership(dbcluster)
            dbmetacluster.members.append(dbcluster)

            plenaries.append(Plenary.get_plenary(dbmetacluster))

        session.add(dbcluster)
        session.flush()
        session.refresh(dbcluster)

        plenaries.append(Plenary.get_plenary(dbcluster))

        key = plenaries.get_write_key()

        try:
            lock_queue.acquire(key)
            plenaries.write(locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)
Exemplo n.º 17
0
    def render(self,
               session,
               logger,
               hostname,
               machine,
               archetype,
               domain,
               sandbox,
               osname,
               osversion,
               buildstatus,
               personality,
               comments,
               zebra_interfaces,
               grn,
               eon_id,
               skip_dsdb_check=False,
               **arguments):
        dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        section = "archetype_" + dbarchetype.name

        # This is for the various add_*_host commands
        if not domain and not sandbox:
            domain = self.config.get(section, "host_domain")

        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError(
                "Adding hosts to {0:l} is not allowed.".format(dbbranch))

        if not buildstatus:
            buildstatus = 'build'
        dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True)
        dbmachine = Machine.get_unique(session, machine, compel=True)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        if not personality:
            if self.config.has_option(section, "default_personality"):
                personality = self.config.get(section, "default_personality")
            else:
                personality = 'generic'
        dbpersonality = Personality.get_unique(session,
                                               name=personality,
                                               archetype=dbarchetype,
                                               compel=True)

        if not osname:
            if self.config.has_option(section, "default_osname"):
                osname = self.config.get(section, "default_osname")
        if not osversion:
            if self.config.has_option(section, "default_osversion"):
                osversion = self.config.get(section, "default_osversion")

        if not osname or not osversion:
            raise ArgumentError("Can not determine a sensible default OS "
                                "for archetype %s. Please use the "
                                "--osname and --osversion parameters." %
                                (dbarchetype.name))

        dbos = OperatingSystem.get_unique(session,
                                          name=osname,
                                          version=osversion,
                                          archetype=dbarchetype,
                                          compel=True)

        if (dbmachine.model.machine_type == 'aurora_node'
                and dbpersonality.archetype.name != 'aurora'):
            raise ArgumentError("Machines of type aurora_node can only be "
                                "added with archetype aurora.")

        if dbmachine.host:
            raise ArgumentError("{0:c} {0.label} is already allocated to "
                                "{1:l}.".format(dbmachine, dbmachine.host))

        if grn or eon_id:
            dbgrn = lookup_grn(session,
                               grn,
                               eon_id,
                               logger=logger,
                               config=self.config)
        else:
            dbgrn = dbpersonality.owner_grn

        dbhost = Host(machine=dbmachine,
                      branch=dbbranch,
                      owner_grn=dbgrn,
                      sandbox_author=dbauthor,
                      personality=dbpersonality,
                      status=dbstatus,
                      operating_system=dbos,
                      comments=comments)
        session.add(dbhost)

        if self.config.has_option("archetype_" + archetype,
                                  "default_grn_target"):
            dbhost.grns.append((dbhost, dbgrn,
                                self.config.get("archetype_" + archetype,
                                                "default_grn_target")))

        if zebra_interfaces:
            # --autoip does not make sense for Zebra (at least not the way it's
            # implemented currently)
            dbinterface = None
        else:
            dbinterface = get_boot_interface(dbmachine)

        # This method is allowed to return None. This can only happen
        # (currently) using add_aurora_host, add_windows_host, or possibly by
        # bypassing the aq client and posting a request directly.
        audit_results = []
        ip = generate_ip(session,
                         logger,
                         dbinterface,
                         audit_results=audit_results,
                         **arguments)

        dbdns_rec, newly_created = grab_address(session,
                                                hostname,
                                                ip,
                                                allow_restricted_domain=True,
                                                allow_reserved=True,
                                                preclude=True)
        dbmachine.primary_name = dbdns_rec

        # Fix up auxiliary addresses to point to the primary name by default
        if ip:
            dns_env = dbdns_rec.fqdn.dns_environment

            for addr in dbmachine.all_addresses():
                if addr.interface.interface_type == "management":
                    continue
                if addr.service_address_id:  # pragma: no cover
                    continue
                for rec in addr.dns_records:
                    if rec.fqdn.dns_environment == dns_env:
                        rec.reverse_ptr = dbdns_rec.fqdn

        if zebra_interfaces:
            if not ip:
                raise ArgumentError(
                    "Zebra configuration requires an IP address.")
            dbsrv_addr = self.assign_zebra_address(session, dbmachine,
                                                   dbdns_rec, zebra_interfaces)
        else:
            if ip:
                if not dbinterface:
                    raise ArgumentError(
                        "You have specified an IP address for the "
                        "host, but {0:l} does not have a bootable "
                        "interface.".format(dbmachine))
                assign_address(dbinterface, ip, dbdns_rec.network)
            dbsrv_addr = None

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if dbmachine.vm_container:
            plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
        if dbsrv_addr:
            plenaries.append(Plenary.get_plenary(dbsrv_addr))

        key = plenaries.get_write_key()
        try:
            lock_queue.acquire(key)
            plenaries.write(locked=True)

            # XXX: This (and some of the code above) is horrible.  There
            # should be a generic/configurable hook here that could kick
            # in based on archetype and/or domain.
            dsdb_runner = DSDBRunner(logger=logger)
            if dbhost.archetype.name == 'aurora':
                # For aurora, check that DSDB has a record of the host.
                if not skip_dsdb_check:
                    try:
                        dsdb_runner.show_host(hostname)
                    except ProcessException, e:
                        raise ArgumentError("Could not find host in DSDB: %s" %
                                            e)
            elif not dbmachine.primary_ip:
                logger.info("No IP for %s, not adding to DSDB." %
                            dbmachine.fqdn)
Exemplo n.º 18
0
    def render(self, session, logger, building, city, address,
               fullname, default_dns_domain, comments, **arguments):
        dbbuilding = get_location(session, building=building)

        old_city = dbbuilding.city

        dsdb_runner = DSDBRunner(logger=logger)

        if address is not None:
            old_address = dbbuilding.address
            dbbuilding.address = address
            dsdb_runner.update_building(dbbuilding.name, dbbuilding.address,
                                        old_address)
        if fullname is not None:
            dbbuilding.fullname = fullname
        if comments is not None:
            dbbuilding.comments = comments
        if default_dns_domain is not None:
            if default_dns_domain:
                dbdns_domain = DnsDomain.get_unique(session, default_dns_domain,
                                                    compel=True)
                dbbuilding.default_dns_domain = dbdns_domain
            else:
                dbbuilding.default_dns_domain = None

        plenaries = PlenaryCollection(logger=logger)
        if city:
            dbcity = get_location(session, city=city)

            # This one would change the template's locations hence forbidden
            if dbcity.hub != dbbuilding.hub:
                # Doing this both to reduce user error and to limit
                # testing required.
                raise ArgumentError("Cannot change hubs. {0} is in {1} "
                                    "while {2} is in {3}.".format(
                                        dbcity, dbcity.hub,
                                        dbbuilding, dbbuilding.hub))

            # issue svcmap warnings
            maps = 0
            for map_type in [ServiceMap, PersonalityServiceMap]:
                maps = maps + session.query(map_type).\
                    filter_by(location=old_city).count()

            if maps > 0:
                logger.client_info("There are {0} service(s) mapped to the "
                                   "old location of the ({1:l}), please "
                                   "review and manually update mappings for "
                                   "the new location as needed.".format(
                                       maps, dbbuilding.city))

            dbbuilding.update_parent(parent=dbcity)

            if old_city.campus and (old_city.campus != dbcity.campus):
                dsdb_runner.del_campus_building(old_city.campus, building)

            if dbcity.campus and (old_city.campus != dbcity.campus):
                dsdb_runner.add_campus_building(dbcity.campus, building)

            query = session.query(Machine)
            query = query.filter(Machine.location_id.in_(dbcity.offspring_ids()))

            for dbmachine in query:
                plenaries.append(PlenaryMachineInfo(dbmachine, logger=logger))

        session.flush()

        if plenaries.plenaries:
            with plenaries.get_write_key() as key:
                plenaries.stash()
                try:
                    plenaries.write(locked=True)
                    dsdb_runner.commit_or_rollback()
                except:
                    plenaries.restore_stash()
        else:
            dsdb_runner.commit_or_rollback()

        return