Ejemplo n.º 1
0
 def render(self, session, machine, model, vendor, machine_type, chassis,
            slot, **arguments):
     q = session.query(Machine)
     if machine:
         # TODO: This command still mixes search/show facilities.
         # For now, give an error if machine name not found, but
         # also allow the command to be used to check if the machine has
         # the requested attributes (via the standard query filters).
         # In the future, this should be clearly separated as 'show machine'
         # and 'search machine'.
         machine = AqStr.normalize(machine)
         Machine.check_label(machine)
         Machine.get_unique(session, machine, compel=True)
         q = q.filter_by(label=machine)
     dblocation = get_location(session, **arguments)
     if dblocation:
         q = q.filter_by(location=dblocation)
     if chassis:
         dbchassis = Chassis.get_unique(session, chassis, compel=True)
         q = q.join('chassis_slot')
         q = q.filter_by(chassis=dbchassis)
         q = q.reset_joinpoint()
     if slot is not None:
         q = q.join('chassis_slot')
         q = q.filter_by(slot_number=slot)
         q = q.reset_joinpoint()
     if model or vendor or machine_type:
         subq = Model.get_matching_query(session, name=model, vendor=vendor,
                                         model_type=machine_type,
                                         compel=True)
         q = q.filter(Machine.model_id.in_(subq))
     return q.order_by(Machine.label).all()
Ejemplo n.º 2
0
 def render(self, session, machine, model, vendor, machine_type, chassis,
            slot, **arguments):
     q = session.query(Machine)
     if machine:
         # TODO: This command still mixes search/show facilities.
         # For now, give an error if machine name not found, but
         # also allow the command to be used to check if the machine has
         # the requested attributes (via the standard query filters).
         # In the future, this should be clearly separated as 'show machine'
         # and 'search machine'.
         machine = AqStr.normalize(machine)
         Machine.check_label(machine)
         Machine.get_unique(session, machine, compel=True)
         q = q.filter_by(label=machine)
     dblocation = get_location(session, **arguments)
     if dblocation:
         q = q.filter_by(location=dblocation)
     if chassis:
         dbchassis = Chassis.get_unique(session, chassis, compel=True)
         q = q.join('chassis_slot')
         q = q.filter_by(chassis=dbchassis)
         q = q.reset_joinpoint()
     if slot is not None:
         q = q.join('chassis_slot')
         q = q.filter_by(slot_number=slot)
         q = q.reset_joinpoint()
     if model or vendor or machine_type:
         subq = Model.get_matching_query(session,
                                         name=model,
                                         vendor=vendor,
                                         machine_type=machine_type,
                                         compel=True)
         q = q.filter(Machine.model_id.in_(subq))
     return q.order_by(Machine.label).all()
Ejemplo n.º 3
0
    def render(self, session, logger, prefix, dns_domain, hostname, machine,
               **args):
        if dns_domain:
            dbdns_domain = DnsDomain.get_unique(session, dns_domain,
                                                compel=True)
        else:
            dbmachine = Machine.get_unique(session, machine, compel=True)
            dbdns_domain = None
            loc = dbmachine.location
            while loc and not dbdns_domain:
                dbdns_domain = loc.default_dns_domain
                loc = loc.parent

            if not dbdns_domain:
                raise ArgumentError("There is no default DNS domain configured "
                                    "for the machine's location. Please "
                                    "specify --dns_domain.")

        # Lock the DNS domain to prevent the same name generated for
        # simultaneous requests
        dbdns_domain.lock_row()

        prefix = AqStr.normalize(prefix)
        result = search_next(session=session, cls=Fqdn, attr=Fqdn.name,
                             value=prefix, dns_domain=dbdns_domain,
                             start=None, pack=None)
        hostname = "%s%d.%s" % (prefix, result, dbdns_domain)

        CommandAddHost.render(self, session, logger, hostname=hostname,
                              machine=machine, **args)

        logger.info("Selected host name %s" % hostname)
        self.audit_result(session, 'hostname', hostname, **args)
        return hostname
Ejemplo n.º 4
0
    def render(self, session, logger, machine, disk, controller, size, all,
               dbuser, **arguments):

        # Handle deprecated arguments
        if arguments.get("type", None):
            self.deprecated_option("type",
                                   "Please use --controller instead.",
                                   logger=logger,
                                   **arguments)
            controller = arguments["type"]
        if arguments.get("capacity", None):
            self.deprecated_option("capacity",
                                   "Please use --size instead.",
                                   logger=logger,
                                   **arguments)
            size = arguments["capacity"]

        dbmachine = Machine.get_unique(session, machine, compel=True)
        q = session.query(Disk).filter_by(machine=dbmachine)
        if disk:
            q = q.filter_by(device_name=disk)
        if controller:
            if controller not in controller_types:
                raise ArgumentError("%s is not a valid controller type, use "
                                    "one of: %s." %
                                    (controller, ", ".join(controller_types)))
            q = q.filter_by(controller_type=controller)
        if size is not None:
            q = q.filter_by(capacity=size)
        results = q.all()

        if len(results) == 0:
            raise NotFoundException("No disks found.")
        elif len(results) > 1 and not all:
            raise ArgumentError("More than one matching disks found.  "
                                "Use --all to delete them all.")
        for result in results:
            session.delete(result)

        session.flush()
        session.expire(dbmachine, ['disks'])

        plenary_machine = Plenary.get_plenary(dbmachine, logger=logger)
        key = plenary_machine.get_write_key()
        dbcontainer = dbmachine.vm_container
        if dbcontainer:
            plenary_container = Plenary.get_plenary(dbcontainer, logger=logger)
            key = CompileKey.merge([key, plenary_container.get_write_key()])
        try:
            lock_queue.acquire(key)
            if dbcontainer:
                plenary_container.write(locked=True)
            plenary_machine.write(locked=True)
        except:
            plenary_machine.restore_stash()
            if dbcontainer:
                plenary_container.restore_stash()
            raise
        finally:
            lock_queue.release(key)
Ejemplo n.º 5
0
    def render(self, session, logger, machine, generate, **kwargs):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        plenary_info = Plenary.get_plenary(dbmachine, logger=logger)

        if generate:
            return plenary_info._generate_content()
        else:
            return plenary_info.read()
Ejemplo n.º 6
0
    def render(self, session, logger, machine, disk, controller, size, all,
               dbuser, **arguments):

        # Handle deprecated arguments
        if arguments.get("type", None):
            self.deprecated_option("type", "Please use --controller instead.",
                                   logger=logger, **arguments)
            controller = arguments["type"]
        if arguments.get("capacity", None):
            self.deprecated_option("capacity", "Please use --size instead.",
                                   logger=logger, **arguments)
            size = arguments["capacity"]

        dbmachine = Machine.get_unique(session, machine, compel=True)
        q = session.query(Disk).filter_by(machine=dbmachine)
        if disk:
            q = q.filter_by(device_name=disk)
        if controller:
            if controller not in controller_types:
                raise ArgumentError("%s is not a valid controller type, use "
                                    "one of: %s." % (controller,
                                                     ", ".join(controller_types)
                                                     ))
            q = q.filter_by(controller_type=controller)
        if size is not None:
            q = q.filter_by(capacity=size)
        results = q.all()

        if len(results) == 0:
            raise NotFoundException("No disks found.")
        elif len(results) > 1 and not all:
            raise ArgumentError("More than one matching disks found.  "
                                "Use --all to delete them all.")
        for result in results:
            session.delete(result)

        session.flush()
        session.expire(dbmachine, ['disks'])

        plenary_machine = Plenary.get_plenary(dbmachine, logger=logger)
        key = plenary_machine.get_write_key()
        dbcontainer = dbmachine.vm_container
        if dbcontainer:
            plenary_container = Plenary.get_plenary(dbcontainer, logger=logger)
            key = CompileKey.merge([key, plenary_container.get_write_key()])
        try:
            lock_queue.acquire(key)
            if dbcontainer:
                plenary_container.write(locked=True)
            plenary_machine.write(locked=True)
        except:
            plenary_machine.restore_stash()
            if dbcontainer:
                plenary_container.restore_stash()
            raise
        finally:
            lock_queue.release(key)
Ejemplo n.º 7
0
    def render(self, session, logger, machine, generate, **kwargs):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        if dbmachine.model.machine_type not in ['blade', 'virtual_machine',
                                                'workstation', 'rackmount']:
            raise ArgumentError("Plenary file not available for %s machines." %
                    dbmachine.model.machine_type)
        plenary_info = PlenaryMachineInfo(dbmachine, logger=logger)

        if generate:
            return plenary_info._generate_content()
        else:
            return plenary_info.read()
Ejemplo n.º 8
0
def hostname_to_host(session, hostname):
    # When the user asked for a host, returning "machine not found" does not
    # feel to be the right error message, even if it is technically correct.
    # It's a little tricky though: we don't want to suppress "dns domain not
    # found"
    parse_fqdn(session, hostname)
    try:
        dbmachine = Machine.get_unique(session, hostname, compel=True)
    except NotFoundException:
        raise NotFoundException("Host %s not found." % hostname)

    if not dbmachine.host:
        raise NotFoundException("{0} does not have a host "
                                "assigned.".format(dbmachine))
    return dbmachine.host
Ejemplo n.º 9
0
Archivo: host.py Proyecto: jrha/aquilon
def hostname_to_host(session, hostname):
    # When the user asked for a host, returning "machine not found" does not
    # feel to be the right error message, even if it is technically correct.
    # It's a little tricky though: we don't want to suppress "dns domain not
    # found"
    parse_fqdn(session, hostname)
    try:
        dbmachine = Machine.get_unique(session, hostname, compel=True)
    except NotFoundException:
        raise NotFoundException("Host %s not found." % hostname)

    if not dbmachine.host:
        raise NotFoundException("{0} does not have a host "
                                "assigned.".format(dbmachine))
    return dbmachine.host
Ejemplo n.º 10
0
    def render(self, session, logger, machine, dbuser, **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)

        remove_plenaries = PlenaryCollection(logger=logger)
        remove_plenaries.append(Plenary.get_plenary(dbmachine))
        if dbmachine.vm_container:
            remove_plenaries.append(Plenary.get_plenary(
                dbmachine.vm_container))
            dbcontainer = dbmachine.vm_container.holder.holder_object
        else:
            dbcontainer = None

        if dbmachine.host:
            raise ArgumentError("{0} is still in use by {1:l} and cannot be "
                                "deleted.".format(dbmachine, dbmachine.host))
        addrs = []
        for addr in dbmachine.all_addresses():
            addrs.append("%s: %s" % (addr.logical_name, addr.ip))
        if addrs:
            addrmsg = ", ".join(addrs)
            raise ArgumentError("{0} still provides the following addresses, "
                                "delete them first: {1}.".format(
                                    dbmachine, addrmsg))

        session.delete(dbmachine)
        session.flush()

        key = remove_plenaries.get_remove_key()
        if dbcontainer:
            plenary_container = Plenary.get_plenary(dbcontainer, logger=logger)
            key = CompileKey.merge([key, plenary_container.get_write_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            if dbcontainer:
                plenary_container.write(locked=True)
            remove_plenaries.remove(locked=True)
        except:
            remove_plenaries.restore_stash()
            if dbcontainer:
                plenary_container.restore_stash()
            raise
        finally:
            lock_queue.release(key)
        return
Ejemplo n.º 11
0
    def render(self, session, logger, machine, dbuser, **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)

        remove_plenaries = PlenaryCollection(logger=logger)
        remove_plenaries.append(Plenary.get_plenary(dbmachine))
        if dbmachine.vm_container:
            remove_plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
            dbcontainer = dbmachine.vm_container.holder.holder_object
        else:
            dbcontainer = None

        if dbmachine.host:
            raise ArgumentError("{0} is still in use by {1:l} and cannot be "
                                "deleted.".format(dbmachine, dbmachine.host))
        addrs = []
        for addr in dbmachine.all_addresses():
            addrs.append("%s: %s" % (addr.logical_name, addr.ip))
        if addrs:
            addrmsg = ", ".join(addrs)
            raise ArgumentError("{0} still provides the following addresses, "
                                "delete them first: {1}.".format(dbmachine,
                                                                 addrmsg))

        session.delete(dbmachine)
        session.flush()

        key = remove_plenaries.get_remove_key()
        if dbcontainer:
            plenary_container = Plenary.get_plenary(dbcontainer, logger=logger)
            key = CompileKey.merge([key, plenary_container.get_write_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            if dbcontainer:
                plenary_container.write(locked=True)
            remove_plenaries.remove(locked=True)
        except:
            remove_plenaries.restore_stash()
            if dbcontainer:
                plenary_container.restore_stash()
            raise
        finally:
            lock_queue.release(key)
        return
Ejemplo n.º 12
0
    def render(self, session, logger, machine, disk, controller, size, all,
               **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        q = session.query(Disk).filter_by(machine=dbmachine)
        if disk:
            q = q.filter_by(device_name=disk)
        if controller:
            if controller not in controller_types:
                raise ArgumentError("%s is not a valid controller type, use "
                                    "one of: %s." % (controller,
                                                     ", ".join(controller_types)
                                                     ))
            q = q.filter_by(controller_type=controller)
        if size is not None:
            q = q.filter_by(capacity=size)
        results = q.all()

        if len(results) == 0:
            raise NotFoundException("No disks found.")
        elif len(results) > 1 and not all:
            raise ArgumentError("More than one matching disks found.  "
                                "Use --all to delete them all.")
        for result in results:
            session.delete(result)

        session.flush()
        session.expire(dbmachine, ['disks'])

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        dbcontainer = dbmachine.vm_container
        if dbcontainer:
            plenaries.append(Plenary.get_plenary(dbcontainer, logger=logger))

        plenaries.write()

        return
Ejemplo n.º 13
0
    def render(self, session, logger, interface, machine, network_device,
               switch, chassis, mac, user, **arguments):
        if switch:
            self.deprecated_option("switch", "Please use --network_device "
                                   "instead.", logger=logger, user=user,
                                   **arguments)
            if not network_device:
                network_device = switch
        self.require_one_of(machine=machine, network_device=network_device,
                            chassis=chassis, mac=mac)

        if machine:
            dbhw_ent = Machine.get_unique(session, machine, compel=True)
        elif network_device:
            dbhw_ent = NetworkDevice.get_unique(session, network_device, compel=True)
        elif chassis:
            dbhw_ent = Chassis.get_unique(session, chassis, compel=True)
        else:
            dbhw_ent = None

        dbinterface = Interface.get_unique(session, hardware_entity=dbhw_ent,
                                           name=interface, mac=mac, compel=True)
        if not dbhw_ent:
            dbhw_ent = dbinterface.hardware_entity

        if dbinterface.vlans:
            vlans = ", ".join([iface.name for iface in
                               dbinterface.vlans.values()])
            raise ArgumentError("{0} is the parent of the following VLAN "
                                "interfaces, delete them first: "
                                "{1}.".format(dbinterface, vlans))

        if dbinterface.slaves:
            slaves = ", ".join([iface.name for iface in dbinterface.slaves])
            raise ArgumentError("{0} is the master of the following slave "
                                "interfaces, delete them first: "
                                "{1}.".format(dbinterface, slaves))

        for addr in dbinterface.assignments:
            if addr.ip != dbhw_ent.primary_ip:
                continue

            # If this is a machine, it is possible to delete the host to get rid
            # of the primary name
            if dbhw_ent.hardware_type == "machine":
                msg = "  You should delete the host first."
            else:
                msg = ""

            raise ArgumentError("{0} holds the primary address of the {1:cl}, "
                                "therefore it cannot be deleted."
                                "{2}".format(dbinterface, dbhw_ent, msg))

        addrs = ", ".join(["%s: %s" % (addr.logical_name, addr.ip) for addr in
                           dbinterface.assignments])
        if addrs:
            raise ArgumentError("{0} still has the following addresses "
                                "configured, delete them first: "
                                "{1}.".format(dbinterface, addrs))

        dbhw_ent.interfaces.remove(dbinterface)
        session.flush()

        if dbhw_ent.hardware_type == 'machine':
            plenary_info = Plenary.get_plenary(dbhw_ent, logger=logger)
            plenary_info.write()
        return
Ejemplo n.º 14
0
    def render(self, session, logger, hostname, machine, archetype,
               buildstatus, personality, osname, osversion, service, instance,
               model, machine_type, vendor, serial, cluster,
               guest_on_cluster, guest_on_share, member_cluster_share,
               domain, sandbox, branch, sandbox_owner,
               dns_domain, shortname, mac, ip, networkip, network_environment,
               exact_location, server_of_service, server_of_instance, grn,
               eon_id, fullinfo, **arguments):
        dbnet_env = NetworkEnvironment.get_unique_or_default(session,
                                                             network_environment)

        q = session.query(Host)

        if machine:
            dbmachine = Machine.get_unique(session, machine, compel=True)
            q = q.filter_by(machine=dbmachine)

        # Add the machine definition and the primary name. Use aliases to make
        # sure the end result will be ordered by primary name.
        PriDns = aliased(DnsRecord)
        PriFqdn = aliased(Fqdn)
        PriDomain = aliased(DnsDomain)
        q = q.join(Machine,
                   (PriDns, PriDns.id == Machine.primary_name_id),
                   (PriFqdn, PriDns.fqdn_id == PriFqdn.id),
                   (PriDomain, PriFqdn.dns_domain_id == PriDomain.id))
        q = q.order_by(PriFqdn.name, PriDomain.name)
        q = q.options(contains_eager('machine'),
                      contains_eager('machine.primary_name', alias=PriDns),
                      contains_eager('machine.primary_name.fqdn', alias=PriFqdn),
                      contains_eager('machine.primary_name.fqdn.dns_domain',
                                     alias=PriDomain))
        q = q.reset_joinpoint()

        # Hardware-specific filters
        dblocation = get_location(session, **arguments)
        if dblocation:
            if exact_location:
                q = q.filter(Machine.location == dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Machine.location_id.in_(childids))

        if model or vendor or machine_type:
            subq = Model.get_matching_query(session, name=model, vendor=vendor,
                                            machine_type=machine_type,
                                            compel=True)
            q = q.filter(Machine.model_id.in_(subq))

        if serial:
            self.deprecated_option("serial", "Please use search machine --serial instead.",
                logger=logger, **arguments)
            q = q.filter(Machine.serial_no == serial)

        # DNS IP address related filters
        if mac or ip or networkip or hostname or dns_domain or shortname:
            # Inner joins are cheaper than outer joins, so make some effort to
            # use inner joins when possible
            if mac or ip or networkip:
                q = q.join(Interface)
            else:
                q = q.outerjoin(Interface)
            if ip or networkip:
                q = q.join(AddressAssignment, Network, from_joinpoint=True)
            else:
                q = q.outerjoin(AddressAssignment, Network, from_joinpoint=True)

            if mac:
                self.deprecated_option("mac", "Please use search machine "
                                       "--mac instead.", logger=logger,
                                       **arguments)
                q = q.filter(Interface.mac == mac)
            if ip:
                q = q.filter(AddressAssignment.ip == ip)
                q = q.filter(Network.network_environment == dbnet_env)
            if networkip:
                dbnetwork = get_network_byip(session, networkip, dbnet_env)
                q = q.filter(AddressAssignment.network == dbnetwork)

            dbdns_domain = None
            if hostname:
                (shortname, dbdns_domain) = parse_fqdn(session, hostname)
            if dns_domain:
                dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True)

            if shortname or dbdns_domain:
                ARecAlias = aliased(ARecord)
                ARecFqdn = aliased(Fqdn)

                q = q.outerjoin((ARecAlias,
                                 and_(ARecAlias.ip == AddressAssignment.ip,
                                      ARecAlias.network_id == AddressAssignment.network_id)),
                                (ARecFqdn, ARecAlias.fqdn_id == ARecFqdn.id))
                if shortname:
                    q = q.filter(or_(ARecFqdn.name == shortname,
                                     PriFqdn.name == shortname))
                if dbdns_domain:
                    q = q.filter(or_(ARecFqdn.dns_domain == dbdns_domain,
                                     PriFqdn.dns_domain == dbdns_domain))
            q = q.reset_joinpoint()

        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     branch=branch)
        if sandbox_owner:
            dbauthor = get_user_principal(session, sandbox_owner)

        if dbbranch:
            q = q.filter_by(branch=dbbranch)
        if dbauthor:
            q = q.filter_by(sandbox_author=dbauthor)

        if archetype:
            # Added to the searches as appropriate below.
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        if personality and archetype:
            dbpersonality = Personality.get_unique(session,
                                                   archetype=dbarchetype,
                                                   name=personality,
                                                   compel=True)
            q = q.filter_by(personality=dbpersonality)
        elif personality:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(name=personality)
            q = q.reset_joinpoint()
        elif archetype:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(archetype=dbarchetype)
            q = q.reset_joinpoint()

        if buildstatus:
            dbbuildstatus = HostLifecycle.get_unique(session, buildstatus,
                                                     compel=True)
            q = q.filter_by(status=dbbuildstatus)

        if osname and osversion and archetype:
            # archetype was already resolved above
            dbos = OperatingSystem.get_unique(session, name=osname,
                                              version=osversion,
                                              archetype=dbarchetype,
                                              compel=True)
            q = q.filter_by(operating_system=dbos)
        elif osname or osversion:
            q = q.join('operating_system')
            if osname:
                q = q.filter_by(name=osname)
            if osversion:
                q = q.filter_by(version=osversion)
            q = q.reset_joinpoint()

        if service:
            dbservice = Service.get_unique(session, service, compel=True)
            if instance:
                dbsi = get_service_instance(session, dbservice, instance)
                q = q.filter(Host.services_used.contains(dbsi))
            else:
                q = q.join('services_used')
                q = q.filter_by(service=dbservice)
                q = q.reset_joinpoint()
        elif instance:
            q = q.join('services_used')
            q = q.filter_by(name=instance)
            q = q.reset_joinpoint()

        if server_of_service:
            dbserver_service = Service.get_unique(session, server_of_service,
                                                  compel=True)
            if server_of_instance:
                dbssi = get_service_instance(session, dbserver_service,
                                             server_of_instance)
                q = q.join('_services_provided')
                q = q.filter_by(service_instance=dbssi)
                q = q.reset_joinpoint()
            else:
                q = q.join('_services_provided', 'service_instance')
                q = q.filter_by(service=dbserver_service)
                q = q.reset_joinpoint()
        elif server_of_instance:
            q = q.join('_services_provided', 'service_instance')
            q = q.filter_by(name=server_of_instance)
            q = q.reset_joinpoint()

        if cluster:
            dbcluster = Cluster.get_unique(session, cluster, compel=True)
            if isinstance(dbcluster, MetaCluster):
                q = q.join('_cluster', 'cluster', '_metacluster')
                q = q.filter_by(metacluster=dbcluster)
            else:
                q = q.filter_by(cluster=dbcluster)
            q = q.reset_joinpoint()
        if guest_on_cluster:
            # TODO: this does not handle metaclusters according to Wes
            dbcluster = Cluster.get_unique(session, guest_on_cluster,
                                           compel=True)
            q = q.join('machine', VirtualMachine, ClusterResource)
            q = q.filter_by(cluster=dbcluster)
            q = q.reset_joinpoint()
        if guest_on_share:
            #v2
            v2shares = session.query(Share.id).filter_by(name=guest_on_share).all()
            if not v2shares:
                raise NotFoundException("No shares found with name {0}."
                                        .format(guest_on_share))

            NasAlias = aliased(VirtualDisk)
            q = q.join('machine', 'disks', (NasAlias, NasAlias.id == Disk.id))
            q = q.filter(
                NasAlias.share_id.in_(map(lambda s: s[0], v2shares)))
            q = q.reset_joinpoint()

        if member_cluster_share:
            #v2
            v2shares = session.query(Share.id).filter_by(name=member_cluster_share).all()
            if not v2shares:
                raise NotFoundException("No shares found with name {0}."
                                        .format(guest_on_share))

            NasAlias = aliased(VirtualDisk)

            q = q.join('_cluster', 'cluster', 'resholder', VirtualMachine,
                       'machine', 'disks', (NasAlias, NasAlias.id == Disk.id))
            q = q.filter(
                NasAlias.share_id.in_(map(lambda s: s[0], v2shares)))
            q = q.reset_joinpoint()

        if grn or eon_id:
            dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False)

            persq = session.query(Personality.id)
            persq = persq.outerjoin(PersonalityGrnMap)
            persq = persq.filter(or_(Personality.owner_eon_id == dbgrn.eon_id,
                                     PersonalityGrnMap.eon_id == dbgrn.eon_id))
            q = q.outerjoin(HostGrnMap)
            q = q.filter(or_(Host.owner_eon_id == dbgrn.eon_id,
                             HostGrnMap.eon_id == dbgrn.eon_id,
                             Host.personality_id.in_(persq.subquery())))
            q = q.reset_joinpoint()

        if fullinfo:
            return q.all()
        return SimpleHostList(q.all())
Ejemplo n.º 15
0
    def render(self,
               session,
               logger,
               hostname,
               machine,
               archetype,
               domain,
               sandbox,
               osname,
               osversion,
               buildstatus,
               personality,
               comments,
               zebra_interfaces,
               grn,
               eon_id,
               skip_dsdb_check=False,
               **arguments):
        dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        section = "archetype_" + dbarchetype.name

        # This is for the various add_*_host commands
        if not domain and not sandbox:
            domain = self.config.get(section, "host_domain")

        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError(
                "Adding hosts to {0:l} is not allowed.".format(dbbranch))

        if not buildstatus:
            buildstatus = 'build'
        dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True)
        dbmachine = Machine.get_unique(session, machine, compel=True)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        if not personality:
            if self.config.has_option(section, "default_personality"):
                personality = self.config.get(section, "default_personality")
            else:
                personality = 'generic'
        dbpersonality = Personality.get_unique(session,
                                               name=personality,
                                               archetype=dbarchetype,
                                               compel=True)

        if not osname:
            if self.config.has_option(section, "default_osname"):
                osname = self.config.get(section, "default_osname")
        if not osversion:
            if self.config.has_option(section, "default_osversion"):
                osversion = self.config.get(section, "default_osversion")

        if not osname or not osversion:
            raise ArgumentError("Can not determine a sensible default OS "
                                "for archetype %s. Please use the "
                                "--osname and --osversion parameters." %
                                (dbarchetype.name))

        dbos = OperatingSystem.get_unique(session,
                                          name=osname,
                                          version=osversion,
                                          archetype=dbarchetype,
                                          compel=True)

        if (dbmachine.model.machine_type == 'aurora_node'
                and dbpersonality.archetype.name != 'aurora'):
            raise ArgumentError("Machines of type aurora_node can only be "
                                "added with archetype aurora.")

        if dbmachine.host:
            raise ArgumentError("{0:c} {0.label} is already allocated to "
                                "{1:l}.".format(dbmachine, dbmachine.host))

        if grn or eon_id:
            dbgrn = lookup_grn(session,
                               grn,
                               eon_id,
                               logger=logger,
                               config=self.config)
        else:
            dbgrn = dbpersonality.owner_grn

        dbhost = Host(machine=dbmachine,
                      branch=dbbranch,
                      owner_grn=dbgrn,
                      sandbox_author=dbauthor,
                      personality=dbpersonality,
                      status=dbstatus,
                      operating_system=dbos,
                      comments=comments)
        session.add(dbhost)

        if self.config.has_option("archetype_" + archetype,
                                  "default_grn_target"):
            dbhost.grns.append((dbhost, dbgrn,
                                self.config.get("archetype_" + archetype,
                                                "default_grn_target")))

        if zebra_interfaces:
            # --autoip does not make sense for Zebra (at least not the way it's
            # implemented currently)
            dbinterface = None
        else:
            dbinterface = get_boot_interface(dbmachine)

        # This method is allowed to return None. This can only happen
        # (currently) using add_aurora_host, add_windows_host, or possibly by
        # bypassing the aq client and posting a request directly.
        audit_results = []
        ip = generate_ip(session,
                         logger,
                         dbinterface,
                         audit_results=audit_results,
                         **arguments)

        dbdns_rec, newly_created = grab_address(session,
                                                hostname,
                                                ip,
                                                allow_restricted_domain=True,
                                                allow_reserved=True,
                                                preclude=True)
        dbmachine.primary_name = dbdns_rec

        # Fix up auxiliary addresses to point to the primary name by default
        if ip:
            dns_env = dbdns_rec.fqdn.dns_environment

            for addr in dbmachine.all_addresses():
                if addr.interface.interface_type == "management":
                    continue
                if addr.service_address_id:  # pragma: no cover
                    continue
                for rec in addr.dns_records:
                    if rec.fqdn.dns_environment == dns_env:
                        rec.reverse_ptr = dbdns_rec.fqdn

        if zebra_interfaces:
            if not ip:
                raise ArgumentError(
                    "Zebra configuration requires an IP address.")
            dbsrv_addr = self.assign_zebra_address(session, dbmachine,
                                                   dbdns_rec, zebra_interfaces)
        else:
            if ip:
                if not dbinterface:
                    raise ArgumentError(
                        "You have specified an IP address for the "
                        "host, but {0:l} does not have a bootable "
                        "interface.".format(dbmachine))
                assign_address(dbinterface, ip, dbdns_rec.network)
            dbsrv_addr = None

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if dbmachine.vm_container:
            plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
        if dbsrv_addr:
            plenaries.append(Plenary.get_plenary(dbsrv_addr))

        key = plenaries.get_write_key()
        try:
            lock_queue.acquire(key)
            plenaries.write(locked=True)

            # XXX: This (and some of the code above) is horrible.  There
            # should be a generic/configurable hook here that could kick
            # in based on archetype and/or domain.
            dsdb_runner = DSDBRunner(logger=logger)
            if dbhost.archetype.name == 'aurora':
                # For aurora, check that DSDB has a record of the host.
                if not skip_dsdb_check:
                    try:
                        dsdb_runner.show_host(hostname)
                    except ProcessException, e:
                        raise ArgumentError("Could not find host in DSDB: %s" %
                                            e)
            elif not dbmachine.primary_ip:
                logger.info("No IP for %s, not adding to DSDB." %
                            dbmachine.fqdn)
Ejemplo n.º 16
0
    def render(self, session, logger, interface, machine, mac, model, vendor,
               boot, pg, autopg, comments, master, clear_master, default_route,
               rename_to, **arguments):
        """This command expects to locate an interface based only on name
        and machine - all other fields, if specified, are meant as updates.

        If the machine has a host, dsdb may need to be updated.

        The boot flag can *only* be set to true.  This is mostly technical,
        as at this point in the interface it is difficult to tell if the
        flag was unset or set to false.  However, it also vastly simplifies
        the dsdb logic - we never have to worry about a user trying to
        remove the boot flag from a host in dsdb.

        """

        audit_results = []

        dbhw_ent = Machine.get_unique(session, machine, compel=True)
        dbinterface = Interface.get_unique(session,
                                           hardware_entity=dbhw_ent,
                                           name=interface,
                                           compel=True)

        oldinfo = DSDBRunner.snapshot_hw(dbhw_ent)

        if arguments.get('hostname', None):
            # Hack to set an intial interface for an aurora host...
            dbhost = dbhw_ent.host
            if dbhost.archetype.name == 'aurora' and \
               dbhw_ent.primary_ip and not dbinterface.addresses:
                assign_address(dbinterface, dbhw_ent.primary_ip,
                               dbhw_ent.primary_name.network)

        # We may need extra IP verification (or an autoip option)...
        # This may also throw spurious errors if attempting to set the
        # port_group to a value it already has.
        if pg is not None and dbinterface.port_group != pg.lower().strip():
            dbinterface.port_group = verify_port_group(
                dbinterface.hardware_entity, pg)
        elif autopg:
            dbinterface.port_group = choose_port_group(
                session, logger, dbinterface.hardware_entity)
            audit_results.append(('pg', dbinterface.port_group))

        if master:
            if dbinterface.addresses:
                # FIXME: as a special case, if the only address is the
                # primary IP, then we could just move it to the master
                # interface. However this can be worked around by bonding
                # the interface before calling "add host", so don't bother
                # for now.
                raise ArgumentError("Can not enslave {0:l} because it has "
                                    "addresses.".format(dbinterface))
            dbmaster = Interface.get_unique(session,
                                            hardware_entity=dbhw_ent,
                                            name=master,
                                            compel=True)
            if dbmaster in dbinterface.all_slaves():
                raise ArgumentError(
                    "Enslaving {0:l} would create a circle, "
                    "which is not allowed.".format(dbinterface))
            dbinterface.master = dbmaster

        if clear_master:
            if not dbinterface.master:
                raise ArgumentError("{0} is not a slave.".format(dbinterface))
            dbinterface.master = None

        if comments:
            dbinterface.comments = comments
        if boot:
            # Should we also transfer the primary IP to the new boot interface?
            # That could get tricky if the new interface already has an IP
            # address...
            for i in dbhw_ent.interfaces:
                if i == dbinterface:
                    i.bootable = True
                    i.default_route = True
                else:
                    i.bootable = False
                    i.default_route = False
        if default_route is not None:
            dbinterface.default_route = default_route
            if not first_of(dbhw_ent.interfaces, lambda x: x.default_route):
                logger.client_info("Warning: {0:l} has no default route, hope "
                                   "that's ok.".format(dbhw_ent))

        #Set this mac address last so that you can update to a bootable
        #interface *before* adding a mac address. This is so the validation
        #that takes place in the interface class doesn't have to be worried
        #about the order of update to bootable=True and mac address
        if mac:
            q = session.query(Interface).filter_by(mac=mac)
            other = q.first()
            if other and other != dbinterface:
                raise ArgumentError("MAC address {0} is already in use by "
                                    "{1:l}.".format(mac, other))
            dbinterface.mac = mac

        if model or vendor:
            if not dbinterface.model_allowed:
                raise ArgumentError(
                    "Model/vendor can not be set for a {0:lc}.".format(
                        dbinterface))

            dbmodel = Model.get_unique(session,
                                       name=model,
                                       vendor=vendor,
                                       machine_type='nic',
                                       compel=True)
            dbinterface.model = dbmodel
        if rename_to:
            rename_interface(session, dbinterface, rename_to)

        session.flush()
        session.refresh(dbhw_ent)

        plenary_info = PlenaryMachineInfo(dbhw_ent, logger=logger)
        key = plenary_info.get_write_key()
        try:
            lock_queue.acquire(key)
            plenary_info.write(locked=True)

            if dbhw_ent.host and dbhw_ent.host.archetype.name != "aurora":
                dsdb_runner = DSDBRunner(logger=logger)
                dsdb_runner.update_host(dbhw_ent, oldinfo)
                dsdb_runner.commit_or_rollback()
        except AquilonError, err:
            plenary_info.restore_stash()
            raise ArgumentError(err)
Ejemplo n.º 17
0
    def render(self, session, logger, interface, machine, mac, automac, model,
               vendor, pg, autopg, type, comments, **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)
        audit_results = []

        q = session.query(Interface)
        q = q.filter_by(name=interface, hardware_entity=dbmachine)
        if q.first():
            raise ArgumentError(
                "Machine %s already has an interface named %s." %
                (machine, interface))

        if not type:
            type = 'public'
            management_types = ['bmc', 'ilo', 'ipmi']
            for mtype in management_types:
                if interface.startswith(mtype):
                    type = 'management'
                    break

            if interface.startswith("bond"):
                type = 'bonding'
            elif interface.startswith("br"):
                type = 'bridge'

            # Test it last, VLANs can be added on top of almost anything
            if '.' in interface:
                type = 'vlan'

        if type == "oa" or type == "loopback":
            raise ArgumentError("Interface type '%s' is not valid for "
                                "machines." % type)

        bootable = None
        if type == 'public':
            if interface == 'eth0':
                bootable = True
            else:
                bootable = False

        dbmanager = None
        pending_removals = PlenaryCollection()
        dsdb_runner = DSDBRunner(logger=logger)
        if mac:
            prev = session.query(Interface).filter_by(mac=mac).first()
            if prev and prev.hardware_entity == dbmachine:
                raise ArgumentError("{0} already has an interface with MAC "
                                    "address {1}.".format(dbmachine, mac))
            # Is the conflicting interface something that can be
            # removed?  It is if:
            # - we are currently attempting to add a management interface
            # - the old interface belongs to a machine
            # - the old interface is associated with a host
            # - that host was blindly created, and thus can be removed safely
            if prev and type == 'management' and \
               prev.hardware_entity.hardware_type == 'machine' and \
               prev.hardware_entity.host and \
               prev.hardware_entity.host.status.name == 'blind':
                # FIXME: Is this just always allowed?  Maybe restrict
                # to only aqd-admin and the host itself?
                dummy_machine = prev.hardware_entity
                dummy_ip = dummy_machine.primary_ip
                old_fqdn = str(dummy_machine.primary_name)
                old_iface = prev.name
                old_mac = prev.mac
                old_network = get_net_id_from_ip(session, dummy_ip)
                self.remove_prev(session, logger, prev, pending_removals)
                session.flush()
                dsdb_runner.delete_host_details(old_fqdn, dummy_ip, old_iface,
                                                old_mac)
                self.consolidate_names(session, logger, dbmachine,
                                       dummy_machine.label, pending_removals)
                # It seems like a shame to throw away the IP address that
                # had been allocated for the blind host.  Try to use it
                # as it should be used...
                dbmanager = self.add_manager(session, logger, dbmachine,
                                             dummy_ip, old_network)
            elif prev:
                msg = describe_interface(session, prev)
                raise ArgumentError("MAC address %s is already in use: %s." %
                                    (mac, msg))
        elif automac:
            mac = self.generate_mac(session, dbmachine)
            audit_results.append(('mac', mac))
        else:
            #Ignore now that Mac Address can be null
            pass

        if pg is not None:
            port_group = verify_port_group(dbmachine, pg)
        elif autopg:
            port_group = choose_port_group(session, logger, dbmachine)
            audit_results.append(('pg', port_group))
        else:
            port_group = None

        dbinterface = get_or_create_interface(session,
                                              dbmachine,
                                              name=interface,
                                              vendor=vendor,
                                              model=model,
                                              interface_type=type,
                                              mac=mac,
                                              bootable=bootable,
                                              port_group=port_group,
                                              comments=comments,
                                              preclude=True)

        # So far, we're *only* creating a manager if we happen to be
        # removing a blind entry and we can steal its IP address.
        if dbmanager:
            assign_address(dbinterface, dbmanager.ip, dbmanager.network)

        session.add(dbinterface)
        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if pending_removals and dbmachine.host:
            # Not an exact test, but the file won't be re-written
            # if the contents are the same so calling too often is
            # not a major expense.
            plenaries.append(Plenary.get_plenary(dbmachine.host))
        # Even though there may be removals going on the write key
        # should be sufficient here.
        key = plenaries.get_write_key()
        try:
            lock_queue.acquire(key)
            pending_removals.stash()
            plenaries.write(locked=True)
            pending_removals.remove(locked=True)

            dsdb_runner.update_host(dbmachine, oldinfo)
            dsdb_runner.commit_or_rollback("Could not update host in DSDB")
        except:
            plenaries.restore_stash()
            pending_removals.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        if dbmachine.host:
            # FIXME: reconfigure host
            pass

        for name, value in audit_results:
            self.audit_result(session, name, value, **arguments)
        return
Ejemplo n.º 18
0
def test_get_unique_by_label():
    """ test machine.get_unique """
    mchn = Machine.get_unique(sess, NAME, compel=True)
    assert mchn, "get_unique failure for machine"
    print mchn
Ejemplo n.º 19
0
    def render(
        self,
        session,
        logger,
        interface,
        machine,
        mac,
        model,
        vendor,
        boot,
        pg,
        autopg,
        comments,
        master,
        clear_master,
        default_route,
        rename_to,
        **arguments
    ):
        """This command expects to locate an interface based only on name
        and machine - all other fields, if specified, are meant as updates.

        If the machine has a host, dsdb may need to be updated.

        The boot flag can *only* be set to true.  This is mostly technical,
        as at this point in the interface it is difficult to tell if the
        flag was unset or set to false.  However, it also vastly simplifies
        the dsdb logic - we never have to worry about a user trying to
        remove the boot flag from a host in dsdb.

        """

        audit_results = []

        dbhw_ent = Machine.get_unique(session, machine, compel=True)
        dbinterface = Interface.get_unique(session, hardware_entity=dbhw_ent, name=interface, compel=True)

        oldinfo = DSDBRunner.snapshot_hw(dbhw_ent)

        if arguments.get("hostname", None):
            # Hack to set an intial interface for an aurora host...
            dbhost = dbhw_ent.host
            if dbhost.archetype.name == "aurora" and dbhw_ent.primary_ip and not dbinterface.addresses:
                assign_address(dbinterface, dbhw_ent.primary_ip, dbhw_ent.primary_name.network)

        # We may need extra IP verification (or an autoip option)...
        # This may also throw spurious errors if attempting to set the
        # port_group to a value it already has.
        if pg is not None and dbinterface.port_group != pg.lower().strip():
            dbinterface.port_group = verify_port_group(dbinterface.hardware_entity, pg)
        elif autopg:
            dbinterface.port_group = choose_port_group(session, logger, dbinterface.hardware_entity)
            audit_results.append(("pg", dbinterface.port_group))

        if master:
            if dbinterface.addresses:
                # FIXME: as a special case, if the only address is the
                # primary IP, then we could just move it to the master
                # interface. However this can be worked around by bonding
                # the interface before calling "add host", so don't bother
                # for now.
                raise ArgumentError("Can not enslave {0:l} because it has " "addresses.".format(dbinterface))
            dbmaster = Interface.get_unique(session, hardware_entity=dbhw_ent, name=master, compel=True)
            if dbmaster in dbinterface.all_slaves():
                raise ArgumentError(
                    "Enslaving {0:l} would create a circle, " "which is not allowed.".format(dbinterface)
                )
            dbinterface.master = dbmaster

        if clear_master:
            if not dbinterface.master:
                raise ArgumentError("{0} is not a slave.".format(dbinterface))
            dbinterface.master = None

        if comments:
            dbinterface.comments = comments
        if boot:
            # Should we also transfer the primary IP to the new boot interface?
            # That could get tricky if the new interface already has an IP
            # address...
            for i in dbhw_ent.interfaces:
                if i == dbinterface:
                    i.bootable = True
                    i.default_route = True
                else:
                    i.bootable = False
                    i.default_route = False
        if default_route is not None:
            dbinterface.default_route = default_route
            if not first_of(dbhw_ent.interfaces, lambda x: x.default_route):
                logger.client_info("Warning: {0:l} has no default route, hope " "that's ok.".format(dbhw_ent))

        # Set this mac address last so that you can update to a bootable
        # interface *before* adding a mac address. This is so the validation
        # that takes place in the interface class doesn't have to be worried
        # about the order of update to bootable=True and mac address
        if mac:
            q = session.query(Interface).filter_by(mac=mac)
            other = q.first()
            if other and other != dbinterface:
                raise ArgumentError("MAC address {0} is already in use by " "{1:l}.".format(mac, other))
            dbinterface.mac = mac

        if model or vendor:
            if not dbinterface.model_allowed:
                raise ArgumentError("Model/vendor can not be set for a {0:lc}.".format(dbinterface))

            dbmodel = Model.get_unique(session, name=model, vendor=vendor, machine_type="nic", compel=True)
            dbinterface.model = dbmodel
        if rename_to:
            rename_interface(session, dbinterface, rename_to)

        session.flush()
        session.refresh(dbhw_ent)

        plenary_info = PlenaryMachineInfo(dbhw_ent, logger=logger)
        key = plenary_info.get_write_key()
        try:
            lock_queue.acquire(key)
            plenary_info.write(locked=True)

            if dbhw_ent.host and dbhw_ent.host.archetype.name != "aurora":
                dsdb_runner = DSDBRunner(logger=logger)
                dsdb_runner.update_host(dbhw_ent, oldinfo)
                dsdb_runner.commit_or_rollback()
        except AquilonError, err:
            plenary_info.restore_stash()
            raise ArgumentError(err)
Ejemplo n.º 20
0
    def render(
            self,
            session,
            logger,
            # search_cluster
            archetype,
            cluster_type,
            personality,
            domain,
            sandbox,
            branch,
            buildstatus,
            allowed_archetype,
            allowed_personality,
            down_hosts_threshold,
            down_maint_threshold,
            max_members,
            member_archetype,
            member_hostname,
            member_personality,
            capacity_override,
            cluster,
            esx_guest,
            instance,
            esx_metacluster,
            service,
            share,
            esx_share,
            esx_switch,
            esx_virtual_machine,
            fullinfo,
            style,
            **arguments):

        if esx_share:
            self.deprecated_option("esx_share",
                                   "Please use --share instead.",
                                   logger=logger,
                                   **arguments)
            share = esx_share

        if cluster_type == 'esx':
            cls = EsxCluster
        else:
            cls = Cluster

        # Don't load full objects if we only want to show their name
        if fullinfo or style != 'raw':
            q = session.query(cls)
        else:
            q = session.query(cls.name)

        # The ORM automatically de-duplicates the result if we query full
        # objects, but not when we query just the names. Tell the DB to do so.
        q = q.distinct()

        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     branch=branch)
        if dbbranch:
            q = q.filter_by(branch=dbbranch)
        if dbauthor:
            q = q.filter_by(sandbox_author=dbauthor)

        if archetype:
            # Added to the searches as appropriate below.
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        if personality and archetype:
            dbpersonality = Personality.get_unique(session,
                                                   archetype=dbarchetype,
                                                   name=personality,
                                                   compel=True)
            q = q.filter_by(personality=dbpersonality)
        elif personality:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(name=personality)
            q = q.reset_joinpoint()
        elif archetype:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(archetype=dbarchetype)
            q = q.reset_joinpoint()

        if buildstatus:
            dbbuildstatus = ClusterLifecycle.get_unique(session,
                                                        buildstatus,
                                                        compel=True)
            q = q.filter_by(status=dbbuildstatus)

        if cluster_type:
            q = q.filter_by(cluster_type=cluster_type)

        # Go through the arguments and make special dicts for each
        # specific set of location arguments that are stripped of the
        # given prefix.
        location_args = {'cluster_': {}, 'member_': {}}
        for prefix in location_args.keys():
            for (k, v) in arguments.items():
                if k.startswith(prefix):
                    # arguments['cluster_building'] = 'dd'
                    # becomes
                    # location_args['cluster_']['building'] = 'dd'
                    location_args[prefix][k.replace(prefix, '')] = v

        dblocation = get_location(session, **location_args['cluster_'])
        if dblocation:
            if location_args['cluster_']['exact_location']:
                q = q.filter_by(location_constraint=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Cluster.location_constraint_id.in_(childids))
        dblocation = get_location(session, **location_args['member_'])
        if dblocation:
            q = q.join('_hosts', 'host', 'machine')
            if location_args['member_']['exact_location']:
                q = q.filter_by(location=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Machine.location_id.in_(childids))
            q = q.reset_joinpoint()

        # esx stuff
        if cluster:
            q = q.filter_by(name=cluster)
        if esx_metacluster:
            dbmetacluster = MetaCluster.get_unique(session,
                                                   esx_metacluster,
                                                   compel=True)
            q = q.join('_metacluster')
            q = q.filter_by(metacluster=dbmetacluster)
            q = q.reset_joinpoint()
        if esx_virtual_machine:
            dbvm = Machine.get_unique(session,
                                      esx_virtual_machine,
                                      compel=True)
            # TODO: support VMs inside resource groups?
            q = q.join(ClusterResource, VirtualMachine)
            q = q.filter_by(machine=dbvm)
            q = q.reset_joinpoint()
        if esx_guest:
            dbguest = hostname_to_host(session, esx_guest)
            # TODO: support VMs inside resource groups?
            q = q.join(ClusterResource, VirtualMachine, Machine)
            q = q.filter_by(host=dbguest)
            q = q.reset_joinpoint()
        if capacity_override:
            q = q.filter(EsxCluster.memory_capacity != None)
        if esx_switch:
            dbswitch = Switch.get_unique(session, esx_switch, compel=True)
            q = q.filter_by(switch=dbswitch)

        if service:
            dbservice = Service.get_unique(session, name=service, compel=True)
            if instance:
                dbsi = ServiceInstance.get_unique(session,
                                                  name=instance,
                                                  service=dbservice,
                                                  compel=True)
                q = q.filter(Cluster.service_bindings.contains(dbsi))
            else:
                q = q.join('service_bindings')
                q = q.filter_by(service=dbservice)
                q = q.reset_joinpoint()
        elif instance:
            q = q.join('service_bindings')
            q = q.filter_by(name=instance)
            q = q.reset_joinpoint()

        if share:
            # Perform sanity check on the share name
            q2 = session.query(Share)
            q2 = q2.filter_by(name=share)
            if not q2.first():
                raise NotFoundException("Share %s not found." % share)

            CR = aliased(ClusterResource)
            S1 = aliased(Share)
            S2 = aliased(Share)
            RG = aliased(ResourceGroup)
            BR = aliased(BundleResource)
            q = q.join(CR)
            q = q.outerjoin((S1, S1.holder_id == CR.id))
            q = q.outerjoin((RG, RG.holder_id == CR.id),
                            (BR, BR.resourcegroup_id == RG.id),
                            (S2, S2.holder_id == BR.id))
            q = q.filter(or_(S1.name == share, S2.name == share))
            q = q.reset_joinpoint()

        if max_members:
            q = q.filter_by(max_hosts=max_members)

        if down_hosts_threshold:
            (pct, dht) = Cluster.parse_threshold(down_hosts_threshold)
            q = q.filter_by(down_hosts_percent=pct)
            q = q.filter_by(down_hosts_threshold=dht)

        if down_maint_threshold:
            (pct, dmt) = Cluster.parse_threshold(down_maint_threshold)
            q = q.filter_by(down_maint_percent=pct)
            q = q.filter_by(down_maint_threshold=dmt)

        if allowed_archetype:
            # Added to the searches as appropriate below.
            dbaa = Archetype.get_unique(session,
                                        allowed_archetype,
                                        compel=True)
        if allowed_personality and allowed_archetype:
            dbap = Personality.get_unique(session,
                                          archetype=dbaa,
                                          name=allowed_personality,
                                          compel=True)
            q = q.filter(Cluster.allowed_personalities.contains(dbap))
        elif allowed_personality:
            q = q.join('allowed_personalities')
            q = q.filter_by(name=allowed_personality)
            q = q.reset_joinpoint()
        elif allowed_archetype:
            q = q.join('allowed_personalities')
            q = q.filter_by(archetype=dbaa)
            q = q.reset_joinpoint()

        if member_hostname:
            dbhost = hostname_to_host(session, member_hostname)
            q = q.join('_hosts')
            q = q.filter_by(host=dbhost)
            q = q.reset_joinpoint()

        if member_archetype:
            # Added to the searches as appropriate below.
            dbma = Archetype.get_unique(session, member_archetype, compel=True)
        if member_personality and member_archetype:
            q = q.join('_hosts', 'host')
            dbmp = Personality.get_unique(session,
                                          archetype=dbma,
                                          name=member_personality,
                                          compel=True)
            q = q.filter_by(personality=dbmp)
            q = q.reset_joinpoint()
        elif member_personality:
            q = q.join('_hosts', 'host', 'personality')
            q = q.filter_by(name=member_personality)
            q = q.reset_joinpoint()
        elif member_archetype:
            q = q.join('_hosts', 'host', 'personality')
            q = q.filter_by(archetype=dbma)
            q = q.reset_joinpoint()

        if cluster_type == 'esx':
            q = q.order_by(EsxCluster.name)
        else:
            q = q.order_by(Cluster.name)

        if fullinfo:
            return q.all()
        return SimpleClusterList(q.all())
Ejemplo n.º 21
0
def test_get_unique_by_label():
    """ test machine.get_unique """
    mchn = Machine.get_unique(sess, NAME, compel=True)
    assert mchn, 'get_unique failure for machine'
    print mchn
Ejemplo n.º 22
0
    def render(
        self,
        session,
        logger,
        interface,
        machine,
        mac,
        automac,
        model,
        vendor,
        pg,
        autopg,
        iftype,
        type,
        comments,
        **arguments
    ):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)
        audit_results = []

        if type:
            self.deprecated_option("type", "Please use --iftype" "instead.", logger=logger, **arguments)
            if not iftype:
                iftype = type

        if not iftype:
            iftype = "public"
            management_types = ["bmc", "ilo", "ipmi"]
            for mtype in management_types:
                if interface.startswith(mtype):
                    iftype = "management"
                    break

            if interface.startswith("bond"):
                iftype = "bonding"
            elif interface.startswith("br"):
                iftype = "bridge"

            # Test it last, VLANs can be added on top of almost anything
            if "." in interface:
                iftype = "vlan"

        if iftype == "oa" or iftype == "loopback":
            raise ArgumentError("Interface type '%s' is not valid for " "machines." % iftype)

        bootable = None
        if iftype == "public":
            if interface == "eth0":
                bootable = True
            else:
                bootable = False

        dbmanager = None
        pending_removals = PlenaryCollection()
        dsdb_runner = DSDBRunner(logger=logger)
        if mac:
            prev = session.query(Interface).filter_by(mac=mac).first()
            if prev and prev.hardware_entity == dbmachine:
                raise ArgumentError("{0} already has an interface with MAC " "address {1}.".format(dbmachine, mac))
            # Is the conflicting interface something that can be
            # removed?  It is if:
            # - we are currently attempting to add a management interface
            # - the old interface belongs to a machine
            # - the old interface is associated with a host
            # - that host was blindly created, and thus can be removed safely
            if (
                prev
                and iftype == "management"
                and prev.hardware_entity.hardware_type == "machine"
                and prev.hardware_entity.host
                and prev.hardware_entity.host.status.name == "blind"
            ):
                # FIXME: Is this just always allowed?  Maybe restrict
                # to only aqd-admin and the host itself?
                dummy_machine = prev.hardware_entity
                dummy_ip = dummy_machine.primary_ip
                old_fqdn = str(dummy_machine.primary_name)
                old_iface = prev.name
                old_mac = prev.mac
                old_network = get_net_id_from_ip(session, dummy_ip)
                self.remove_prev(session, logger, prev, pending_removals)
                session.flush()
                dsdb_runner.delete_host_details(old_fqdn, dummy_ip, old_iface, old_mac)
                self.consolidate_names(session, logger, dbmachine, dummy_machine.label, pending_removals)
                # It seems like a shame to throw away the IP address that
                # had been allocated for the blind host.  Try to use it
                # as it should be used...
                dbmanager = self.add_manager(session, logger, dbmachine, dummy_ip, old_network)
            elif prev:
                msg = describe_interface(session, prev)
                raise ArgumentError("MAC address %s is already in use: %s." % (mac, msg))
        elif automac:
            mac = self.generate_mac(session, dbmachine)
            audit_results.append(("mac", mac))
        else:
            # Ignore now that Mac Address can be null
            pass

        if pg is not None:
            port_group = verify_port_group(dbmachine, pg)
        elif autopg:
            port_group = choose_port_group(session, logger, dbmachine)
            audit_results.append(("pg", port_group))
        else:
            port_group = None

        dbinterface = get_or_create_interface(
            session,
            dbmachine,
            name=interface,
            vendor=vendor,
            model=model,
            interface_type=iftype,
            mac=mac,
            bootable=bootable,
            port_group=port_group,
            comments=comments,
            preclude=True,
        )

        # So far, we're *only* creating a manager if we happen to be
        # removing a blind entry and we can steal its IP address.
        if dbmanager:
            assign_address(dbinterface, dbmanager.ip, dbmanager.network, logger=logger)

        session.add(dbinterface)
        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if pending_removals and dbmachine.host:
            # Not an exact test, but the file won't be re-written
            # if the contents are the same so calling too often is
            # not a major expense.
            plenaries.append(Plenary.get_plenary(dbmachine.host))
        # Even though there may be removals going on the write key
        # should be sufficient here.
        with plenaries.get_key():
            pending_removals.stash()
            try:
                plenaries.write(locked=True)
                pending_removals.remove(locked=True)

                dsdb_runner.update_host(dbmachine, oldinfo)
                dsdb_runner.commit_or_rollback("Could not update host in DSDB")
            except:
                plenaries.restore_stash()
                pending_removals.restore_stash()
                raise

        if dbmachine.host:
            # FIXME: reconfigure host
            pass

        for name, value in audit_results:
            self.audit_result(session, name, value, **arguments)
        return
Ejemplo n.º 23
0
    def render(self, session, logger, machine, model, vendor, serial, chassis,
               slot, cpuname, cpuvendor, cpuspeed, cpucount, memory, cluster,
               vmhost, uri, comments, **arguments):
        dblocation = get_location(session,
                                  query_options=[subqueryload('parents'),
                                                 joinedload('parents.dns_maps')],
                                  **arguments)
        if chassis:
            dbchassis = Chassis.get_unique(session, chassis, compel=True)
            if slot is None:
                raise ArgumentError("The --chassis option requires a --slot.")
            if dblocation and dblocation != dbchassis.location:
                raise ArgumentError("{0} conflicts with chassis location "
                                    "{1}.".format(dblocation, dbchassis.location))
            dblocation = dbchassis.location
        elif slot is not None:
            raise ArgumentError("The --slot option requires a --chassis.")

        dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                   compel=True)

        if not dbmodel.model_type.isMachineType():
            raise ArgumentError("The add_machine command cannot add machines "
                                "of type %s." % str(dbmodel.model_type))

        vmholder = None

        if cluster or vmhost:
            if cluster and vmhost:
                raise ArgumentError("Cluster and vmhost cannot be specified "
                                    "together.")
            if not dbmodel.model_type.isVirtualMachineType():
                raise ArgumentError("{0} is not a virtual machine."
                                    .format(dbmodel))

            # TODO: do we need VMs inside resource groups?
            vmholder = get_resource_holder(session, hostname=vmhost,
                                           cluster=cluster, resgroup=None,
                                           compel=False)

            if vmholder.holder_object.status.name == 'decommissioned':
                raise ArgumentError("Cannot add virtual machines to "
                                    "decommissioned clusters.")

            if cluster:
                container_loc = vmholder.holder_object.location_constraint
            else:
                container_loc = vmholder.holder_object.hardware_entity.location

            if dblocation and dblocation != container_loc:
                raise ArgumentError("Cannot override container location {0} "
                                    "with location {1}.".format(container_loc,
                                                                dblocation))
            dblocation = container_loc
        elif dbmodel.model_type.isVirtualMachineType():
            raise ArgumentError("Virtual machines must be assigned to a "
                                "cluster or a host.")

        Machine.get_unique(session, machine, preclude=True)
        dbmachine = create_machine(session, machine, dblocation, dbmodel,
                                   cpuname, cpuvendor, cpuspeed, cpucount,
                                   memory, serial, comments)

        if uri and not dbmodel.model_type.isVirtualAppliance():
            raise ArgumentError("URI can be specified only for virtual "
                                "appliances and the model's type is %s." %
                                dbmodel.model_type)

        dbmachine.uri = uri

        if chassis:
            # FIXME: Are virtual machines allowed to be in a chassis?
            dbslot = session.query(ChassisSlot).filter_by(chassis=dbchassis,
                                                          slot_number=slot).first()
            if not dbslot:
                dbslot = ChassisSlot(chassis=dbchassis, slot_number=slot)
            dbslot.machine = dbmachine
            session.add(dbslot)

        if vmholder:
            dbvm = VirtualMachine(machine=dbmachine, name=dbmachine.label,
                                  holder=vmholder)
            if hasattr(vmholder.holder_object, "validate") and \
               callable(vmholder.holder_object.validate):
                vmholder.holder_object.validate()

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if vmholder:
            plenaries.append(Plenary.get_plenary(vmholder.holder_object))
            plenaries.append(Plenary.get_plenary(dbvm))

        # The check to make sure a plenary file is not written out for
        # dummy aurora hardware is within the call to write().  This way
        # it is consistent without altering (and forgetting to alter)
        # all the calls to the method.
        plenaries.write()
        return
Ejemplo n.º 24
0
    def render(self, session, network, network_environment, ip, type, side,
               machine, fqdn, cluster, pg, has_dynamic_ranges, exact_location,
               fullinfo, style, **arguments):
        """Return a network matching the parameters.

        Some of the search terms can only return a unique network.  For
        those (like ip and fqdn) we proceed with the query anyway.  This
        allows for quick scripted tests like "is the network for X.X.X.X
        a tor_net2?".

        """
        dbnet_env = NetworkEnvironment.get_unique_or_default(session,
                                                             network_environment)
        q = session.query(Network)
        q = q.filter_by(network_environment=dbnet_env)
        if network:
            # Note: the network name is not unique (neither in QIP)
            q = q.filter_by(name=network)
        if ip:
            dbnetwork = get_net_id_from_ip(session, ip, dbnet_env)
            q = q.filter_by(id=dbnetwork.id)
        if type:
            q = q.filter_by(network_type=type)
        if side:
            q = q.filter_by(side=side)
        if machine:
            dbmachine = Machine.get_unique(session, machine, compel=True)
            vlans = []
            if dbmachine.cluster and dbmachine.cluster.network_device:
                # If this is a VM on a cluster, consult the VLANs.  There
                # could be functionality here for real hardware to consult
                # interface port groups... there's no real use case yet.
                vlans = [VlanInfo.get_vlan_id(session, i.port_group)
                         for i in dbmachine.interfaces if i.port_group]
                if vlans:
                    q = q.join('observed_vlans')
                    q = q.filter_by(network_device=dbmachine.cluster.network_device)
                    q = q.filter(ObservedVlan.vlan_id.in_(vlans))
                    q = q.reset_joinpoint()
            if not vlans:
                networks = [addr.network.id for addr in
                            dbmachine.all_addresses()]
                if not networks:
                    msg = "Machine %s has no interfaces " % dbmachine.label
                    if dbmachine.cluster:
                        msg += "with a portgroup or "
                    msg += "assigned to a network."
                    raise ArgumentError(msg)
                q = q.filter(Network.id.in_(networks))
        if fqdn:
            (short, dbdns_domain) = parse_fqdn(session, fqdn)
            dnsq = session.query(ARecord.ip)
            dnsq = dnsq.join(ARecord.fqdn)
            dnsq = dnsq.filter_by(name=short)
            dnsq = dnsq.filter_by(dns_domain=dbdns_domain)
            networks = [get_net_id_from_ip(session, addr.ip, dbnet_env).id
                        for addr in dnsq.all()]
            q = q.filter(Network.id.in_(networks))
        if cluster:
            dbcluster = Cluster.get_unique(session, cluster, compel=True)
            if dbcluster.network_device:
                q = q.join('observed_vlans')
                q = q.filter_by(network_device=dbcluster.network_device)
                q = q.reset_joinpoint()
            else:
                net_ids = [h.hardware_entity.primary_name.network.id for h in
                           dbcluster.hosts if getattr(h.hardware_entity.primary_name,
                                                      "network")]
                q = q.filter(Network.id.in_(net_ids))
        if pg:
            vlan = VlanInfo.get_vlan_id(session, pg, compel=ArgumentError)
            q = q.join('observed_vlans')
            q = q.filter_by(vlan_id=vlan)
            q = q.reset_joinpoint()
        dblocation = get_location(session, **arguments)
        if dblocation:
            if exact_location:
                q = q.filter_by(location=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Network.location_id.in_(childids))
        if has_dynamic_ranges:
            q = q.filter(exists([DynamicStub.dns_record_id],
                                from_obj=DynamicStub.__table__.join(ARecord.__table__))
                         .where(Network.id == DynamicStub.network_id))
        q = q.order_by(Network.ip)
        if fullinfo or style != 'raw':
            q = q.options(undefer('comments'))
            return q.all()
        return StringAttributeList(q.all(),
                                   lambda n: "%s/%s" % (n.ip, n.cidr))
Ejemplo n.º 25
0
    def render(self, session, logger, hostname, machine, auxiliary, interface,
               mac, comments, **arguments):
        if machine:
            dbmachine = Machine.get_unique(session, machine, compel=True)
        if hostname:
            dbhost = hostname_to_host(session, hostname)
            if machine and dbhost.machine != dbmachine:
                raise ArgumentError("Use either --hostname or --machine to "
                                    "uniquely identify a system.")
            dbmachine = dbhost.machine

        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        dbinterface = get_or_create_interface(session, dbmachine,
                                              name=interface, mac=mac,
                                              interface_type='public',
                                              bootable=False)

        # Multiple addresses will only be allowed with the "add interface
        # address" command
        addrs = ", ".join(["%s [%s]" % (addr.logical_name, addr.ip) for addr
                           in dbinterface.assignments])
        if addrs:
            raise ArgumentError("{0} already has the following addresses: "
                                "{1}.".format(dbinterface, addrs))

        audit_results = []
        ip = generate_ip(session, logger, dbinterface, compel=True,
                         audit_results=audit_results, **arguments)

        dbdns_rec, newly_created = grab_address(session, auxiliary, ip,
                                                comments=comments,
                                                preclude=True)

        if dbmachine.primary_name:
            # This command cannot use a non-default DNS environment, so no extra
            # checks are necessary
            dbdns_rec.reverse_ptr = dbmachine.primary_name.fqdn

        assign_address(dbinterface, ip, dbdns_rec.network)

        session.flush()

        plenary_info = PlenaryMachineInfo(dbmachine, logger=logger)
        key = plenary_info.get_write_key()
        try:
            lock_queue.acquire(key)
            plenary_info.write(locked=True)

            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.update_host(dbmachine, oldinfo)
            dsdb_runner.commit_or_rollback("Could not add host to DSDB")
        except:
            plenary_info.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        if dbmachine.host:
            # XXX: Host needs to be reconfigured.
            pass

        for name, value in audit_results:
            self.audit_result(session, name, value, **arguments)
        return
Ejemplo n.º 26
0
    def render(self, session, logger, machine, disk, controller, share,
               resourcegroup, address, comments, size, boot, **kw):

        # Handle deprecated arguments
        if kw.get("type"):
            self.deprecated_option("type",
                                   "Please use --controller instead.",
                                   logger=logger,
                                   **kw)
            controller = kw["type"]
        if kw.get("capacity"):
            self.deprecated_option("capacity",
                                   "Please use --size instead.",
                                   logger=logger,
                                   **kw)
            size = kw["capacity"]
        if not size or not controller:
            raise ArgumentError("Please specify both --size "
                                "and --controller.")

        if controller not in controller_types:
            raise ArgumentError("%s is not a valid controller type, use one "
                                "of: %s." %
                                (controller, ", ".join(controller_types)))

        dbmachine = Machine.get_unique(session, machine, compel=True)
        for dbdisk in dbmachine.disks:
            if dbdisk.device_name == disk:
                raise ArgumentError("Machine %s already has a disk named %s." %
                                    (machine, disk))
            if dbdisk.bootable:
                if boot is None:
                    boot = False
                elif boot:
                    raise ArgumentError("Machine %s already has a boot disk." %
                                        machine)

        if boot is None:
            # Backward compatibility: "sda"/"c0d0" is bootable, except if there
            # is already a boot disk
            boot = (disk == "sda" or disk == "c0d0")

        if share:
            if not CommandAddDisk.REGEX_ADDRESS.match(address):
                raise ArgumentError("Disk address '%s' is not valid, it must "
                                    "match \d+:\d+ (e.g. 0:0)." % address)
            if not dbmachine.vm_container:
                raise ArgumentError(
                    "{0} is not a virtual machine, it is not "
                    "possible to define a virtual disk.".format(dbmachine))

            dbshare = find_share(dbmachine.vm_container.holder.holder_object,
                                 resourcegroup, share)
            dbdisk = VirtualDisk(device_name=disk,
                                 controller_type=controller,
                                 bootable=boot,
                                 capacity=size,
                                 address=address,
                                 comments=comments)

            dbshare.disks.append(dbdisk)
        else:
            dbdisk = LocalDisk(device_name=disk,
                               controller_type=controller,
                               capacity=size,
                               bootable=boot,
                               comments=comments)

        dbmachine.disks.append(dbdisk)

        self._write_plenary_info(dbmachine, logger)
        return
Ejemplo n.º 27
0
    def render(self, session, logger, machine, disk, controller, share,
               resourcegroup, address, comments, size, boot, **kw):

        # Handle deprecated arguments
        if kw.get("type"):
            self.deprecated_option("type", "Please use --controller instead.",
                                   logger=logger, **kw)
            controller = kw["type"]
        if kw.get("capacity"):
            self.deprecated_option("capacity", "Please use --size instead.",
                                   logger=logger, **kw)
            size = kw["capacity"]
        if not size or not controller:
            raise ArgumentError("Please specify both --size "
                                "and --controller.")

        if controller not in controller_types:
            raise ArgumentError("%s is not a valid controller type, use one "
                                "of: %s." % (controller,
                                             ", ".join(controller_types)))

        dbmachine = Machine.get_unique(session, machine, compel=True)
        for dbdisk in dbmachine.disks:
            if dbdisk.device_name == disk:
                raise ArgumentError("Machine %s already has a disk named %s." %
                                    (machine, disk))
            if dbdisk.bootable:
                if boot is None:
                    boot = False
                elif boot:
                    raise ArgumentError("Machine %s already has a boot disk." %
                                        machine)

        if boot is None:
            # Backward compatibility: "sda"/"c0d0" is bootable, except if there
            # is already a boot disk
            boot = (disk == "sda" or disk == "c0d0")

        if share:
            if not CommandAddDisk.REGEX_ADDRESS.match(address):
                raise ArgumentError("Disk address '%s' is not valid, it must "
                                    "match \d+:\d+ (e.g. 0:0)." % address)
            if not dbmachine.vm_container:
                raise ArgumentError("{0} is not a virtual machine, it is not "
                                    "possible to define a virtual disk."
                                    .format(dbmachine))

            dbshare = find_share(dbmachine.vm_container.holder.holder_object,
                                 resourcegroup, share)
            dbdisk = VirtualDisk(device_name=disk, controller_type=controller,
                                 bootable=boot, capacity=size, address=address,
                                 comments=comments)

            dbshare.disks.append(dbdisk)
        else:
            dbdisk = LocalDisk(device_name=disk, controller_type=controller,
                               capacity=size, bootable=boot, comments=comments)

        dbmachine.disks.append(dbdisk)

        self._write_plenary_info(dbmachine, logger)
        return
Ejemplo n.º 28
0
class CommandAddAuroraHost(CommandAddHost):

    required_parameters = ["hostname"]

    # Look for node name like <building><rack_id>c<chassis_id>n<node_num>
    nodename_re = re.compile(r'^\s*([a-zA-Z]+)(\d+)c(\d+)n(\d+)\s*$')

    def render(self, session, logger, hostname, osname, osversion, **kwargs):
        # Pull relevant info out of dsdb...
        dsdb_runner = DSDBRunner(logger=logger)
        try:
            fields = dsdb_runner.show_host(hostname)
        except ProcessException, e:
            raise ArgumentError("Could not find %s in DSDB: %s" %
                                (hostname, e))

        fqdn = fields["fqdn"]
        dsdb_lookup = fields["dsdb_lookup"]
        if fields["node"]:
            machine = fields["node"]
        elif fields["primary_name"]:
            machine = fields["primary_name"]
        else:
            machine = dsdb_lookup

        # Create a machine
        dbmodel = Model.get_unique(session, name="aurora_model",
                                   vendor="aurora_vendor", compel=True)
        dbmachine = Machine.get_unique(session, machine)
        dbslot = None
        if not dbmachine:
            m = self.nodename_re.search(machine)
            if m:
                (building, rid, cid, nodenum) = m.groups()
                dbbuilding = session.query(Building).filter_by(
                        name=building).first()
                if not dbbuilding:
                    raise ArgumentError("Failed to find building %s for "
                                        "node %s, please add an Aurora "
                                        "machine manually and follow with "
                                        "add_host." % (building, machine))
                rack = building + rid
                dbrack = session.query(Rack).filter_by(name=rack).first()
                if not dbrack:
                    try:
                        rack_fields = dsdb_runner.show_rack(rack)
                        dbrack = Rack(name=rack, fullname=rack,
                                      parent=dbbuilding,
                                      rack_row=rack_fields["rack_row"],
                                      rack_column=rack_fields["rack_col"])
                        session.add(dbrack)
                    except (ProcessException, ValueError), e:
                        logger.client_info("Rack %s not defined in DSDB." % rack)
                dblocation = dbrack or dbbuilding
                chassis = rack + "c" + cid
                dbdns_domain = session.query(DnsDomain).filter_by(
                        name="ms.com").first()
                dbchassis = Chassis.get_unique(session, chassis)
                if not dbchassis:
                    dbchassis_model = Model.get_unique(session,
                                                       name="aurora_chassis_model",
                                                       vendor="aurora_vendor",
                                                       compel=True)
                    dbchassis = Chassis(label=chassis, location=dblocation,
                                        model=dbchassis_model)
                    session.add(dbchassis)
                    dbfqdn = Fqdn.get_or_create(session, name=chassis,
                                                dns_domain=dbdns_domain,
                                                preclude=True)
                    dbdns_rec = ReservedName(fqdn=dbfqdn)
                    session.add(dbdns_rec)
                    dbchassis.primary_name = dbdns_rec
                dbslot = session.query(ChassisSlot).filter_by(
                        chassis=dbchassis, slot_number=nodenum).first()
                # Note: Could be even more persnickity here and check that
                # the slot is currently empty.  Seems like overkill.
                if not dbslot:
                    dbslot = ChassisSlot(chassis=dbchassis,
                                         slot_number=nodenum)
                    session.add(dbslot)
            else:
                dbnet_env = NetworkEnvironment.get_unique_or_default(session)

                try:
                    host_ip = gethostbyname(hostname)
                except gaierror, e:
                    raise ArgumentError("Error when looking up host: %d, %s" %
                                        (e.errno, e.strerror))

                dbnetwork = get_net_id_from_ip(session, IPv4Address(host_ip), dbnet_env)
                dblocation = dbnetwork.location.building
Ejemplo n.º 29
0
    def render(self, session, logger, machine, disk, controller, share,
               filesystem, resourcegroup, address, comments, size, boot,
               snapshot, **kw):
        if controller not in controller_types:
            raise ArgumentError("%s is not a valid controller type, use one "
                                "of: %s." % (controller,
                                             ", ".join(controller_types)))

        dbmachine = Machine.get_unique(session, machine, compel=True)
        for dbdisk in dbmachine.disks:
            if dbdisk.device_name == disk:
                raise ArgumentError("Machine %s already has a disk named %s." %
                                    (machine, disk))
            if dbdisk.bootable:
                if boot is None:
                    boot = False
                elif boot:
                    raise ArgumentError("Machine %s already has a boot disk." %
                                        machine)

        if boot is None:
            # Backward compatibility: "sda"/"c0d0" is bootable, except if there
            # is already a boot disk
            boot = (disk == "sda" or disk == "c0d0")

        if share:
            if not dbmachine.vm_container:
                raise ArgumentError("{0} is not a virtual machine, it is not "
                                    "possible to define a virtual disk."
                                    .format(dbmachine))

            dbshare = find_resource(Share, dbmachine.vm_container.holder.holder_object,
                                 resourcegroup, share)
            dbdisk = VirtualNasDisk(device_name=disk,
                                    controller_type=controller, bootable=boot,
                                    capacity=size, address=address,
                                    snapshotable=snapshot, comments=comments)

            dbshare.disks.append(dbdisk)
        elif filesystem:
            if not dbmachine.vm_container:
                raise ArgumentError("{0} is not a virtual machine, it is not "
                                    "possible to define a virtual disk."
                                    .format(dbmachine))

            dbfs = Filesystem.get_unique(session, name=filesystem,
                                         holder=dbmachine.vm_container.holder,
                                         compel=True)

            dbdisk = VirtualLocalDisk(device_name=disk,
                                      controller_type=controller, bootable=boot,
                                      capacity=size, address=address,
                                      snapshotable=snapshot,
                                      comments=comments)
            dbfs.disks.append(dbdisk)

        else:
            dbdisk = LocalDisk(device_name=disk, controller_type=controller,
                               capacity=size, bootable=boot, comments=comments)

        dbmachine.disks.append(dbdisk)

        plenary_info = Plenary.get_plenary(dbmachine, logger=logger)
        plenary_info.write()

        return
Ejemplo n.º 30
0
    def render(self, session, logger, machine, model, vendor, serial,
               chassis, slot, clearchassis, multislot,
               vmhost, cluster, allow_metacluster_change,
               cpuname, cpuvendor, cpuspeed, cpucount, memory, ip, uri,
               **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if dbmachine.vm_container:
            plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
        if dbmachine.host:
            # Using PlenaryHostData directly, to avoid warnings if the host has
            # not been configured yet
            plenaries.append(PlenaryHostData.get_plenary(dbmachine.host))

        if clearchassis:
            del dbmachine.chassis_slot[:]

        if chassis:
            dbchassis = Chassis.get_unique(session, chassis, compel=True)
            dbmachine.location = dbchassis.location
            if slot is None:
                raise ArgumentError("Option --chassis requires --slot "
                                    "information.")
            self.adjust_slot(session, logger,
                             dbmachine, dbchassis, slot, multislot)
        elif slot is not None:
            dbchassis = None
            for dbslot in dbmachine.chassis_slot:
                if dbchassis and dbslot.chassis != dbchassis:
                    raise ArgumentError("Machine in multiple chassis, please "
                                        "use --chassis argument.")
                dbchassis = dbslot.chassis
            if not dbchassis:
                raise ArgumentError("Option --slot requires --chassis "
                                    "information.")
            self.adjust_slot(session, logger,
                             dbmachine, dbchassis, slot, multislot)

        dblocation = get_location(session, **arguments)
        if dblocation:
            loc_clear_chassis = False
            for dbslot in dbmachine.chassis_slot:
                dbcl = dbslot.chassis.location
                if dbcl != dblocation:
                    if chassis or slot is not None:
                        raise ArgumentError("{0} conflicts with chassis {1!s} "
                                            "location {2}."
                                            .format(dblocation, dbslot.chassis,
                                                    dbcl))
                    else:
                        loc_clear_chassis = True
            if loc_clear_chassis:
                del dbmachine.chassis_slot[:]
            dbmachine.location = dblocation

            if dbmachine.host:
                for vm in dbmachine.host.virtual_machines:
                    plenaries.append(Plenary.get_plenary(vm))
                    vm.location = dblocation

        if model or vendor:
            # If overriding model, should probably overwrite default
            # machine specs as well.
            if not model:
                model = dbmachine.model.name
            if not vendor:
                vendor = dbmachine.model.vendor.name
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       compel=True)
            if not dbmodel.model_type.isMachineType():
                raise ArgumentError("The update_machine command cannot update "
                                    "machines of type %s." %
                                    dbmodel.model_type)
            # We probably could do this by forcing either cluster or
            # location data to be available as appropriate, but really?
            # Failing seems reasonable.
            if dbmodel.model_type != dbmachine.model.model_type and \
               (dbmodel.model_type.isVirtualMachineType() or
                dbmachine.model.model_type.isVirtualMachineType()):
                raise ArgumentError("Cannot change machine from %s to %s." %
                                    (dbmachine.model.model_type,
                                     dbmodel.model_type))

            old_nic_model = dbmachine.model.nic_model
            new_nic_model = dbmodel.nic_model
            if old_nic_model != new_nic_model:
                for iface in dbmachine.interfaces:
                    if iface.model == old_nic_model:
                        iface.model = new_nic_model

            dbmachine.model = dbmodel

        if cpuname or cpuvendor or cpuspeed is not None:
            dbcpu = Cpu.get_unique(session, name=cpuname, vendor=cpuvendor,
                                   speed=cpuspeed, compel=True)
            dbmachine.cpu = dbcpu

        if cpucount is not None:
            dbmachine.cpu_quantity = cpucount
        if memory is not None:
            dbmachine.memory = memory
        if serial:
            dbmachine.serial_no = serial

        if ip:
            update_primary_ip(session, logger, dbmachine, ip)

        if uri and not dbmachine.model.model_type.isVirtualAppliance():
            raise ArgumentError("URI can be specified only for virtual "
                                "appliances and the model's type is %s" %
                                dbmachine.model.model_type)

        if uri:
            dbmachine.uri = uri

        # FIXME: For now, if a machine has its interface(s) in a portgroup
        # this command will need to be followed by an update_interface to
        # re-evaluate the portgroup for overflow.
        # It would be better to have --pg and --autopg options to let it
        # happen at this point.
        if cluster or vmhost:
            if not dbmachine.vm_container:
                raise ArgumentError("Cannot convert a physical machine to "
                                    "virtual.")

            old_holder = dbmachine.vm_container.holder.holder_object
            resholder = get_resource_holder(session, hostname=vmhost,
                                            cluster=cluster, compel=False)
            new_holder = resholder.holder_object

            if self.get_metacluster(new_holder) != self.get_metacluster(old_holder) \
               and not allow_metacluster_change:
                raise ArgumentError("Current {0:l} does not match "
                                    "new {1:l}."
                                    .format(self.get_metacluster(old_holder),
                                            self.get_metacluster(new_holder)))

            plenaries.append(Plenary.get_plenary(old_holder))
            plenaries.append(Plenary.get_plenary(new_holder))

            dbmachine.vm_container.holder = resholder

            for dbdisk in dbmachine.disks:
                if isinstance(dbdisk, VirtualNasDisk):
                    old_share = dbdisk.share
                    if isinstance(old_share.holder, BundleResource):
                        resourcegroup = old_share.holder.resourcegroup.name
                    else:
                        resourcegroup = None

                    new_share = find_resource(Share, new_holder, resourcegroup, old_share.name,
                                           error=ArgumentError)

                    # If the shares are registered at the metacluster level and both
                    # clusters are in the same metacluster, then there will be no
                    # real change here
                    if new_share != old_share:
                        old_share.disks.remove(dbdisk)
                        new_share.disks.append(dbdisk)

                if isinstance(dbdisk, VirtualLocalDisk):
                    old_filesystem = dbdisk.filesystem

                    new_filesystem = find_resource(Filesystem, new_holder, None,
                                                   old_filesystem.name,
                                                   error=ArgumentError)

                    if new_filesystem != old_filesystem:
                        old_filesystem.disks.remove(dbdisk)
                        new_filesystem.disks.append(dbdisk)

            if isinstance(new_holder, Cluster):
                dbmachine.location = new_holder.location_constraint
            else:
                # vmhost
                dbmachine.location = new_holder.hardware_entity.location

        session.flush()

        # Check if the changed parameters still meet cluster capacity
        # requiremets
        if dbmachine.cluster:
            dbmachine.cluster.validate()
            if allow_metacluster_change:
                dbmachine.cluster.metacluster.validate()
        if dbmachine.host and dbmachine.host.cluster:
            dbmachine.host.cluster.validate()

        # The check to make sure a plenary file is not written out for
        # dummy aurora hardware is within the call to write().  This way
        # it is consistent without altering (and forgetting to alter)
        # all the calls to the method.

        with plenaries.get_key():
            plenaries.stash()
            try:
                plenaries.write(locked=True)

                dsdb_runner = DSDBRunner(logger=logger)
                dsdb_runner.update_host(dbmachine, oldinfo)
                dsdb_runner.commit_or_rollback("Could not update machine in DSDB")
            except:
                plenaries.restore_stash()
                raise

        return
Ejemplo n.º 31
0
    def render(self, session, logger, interface, machine, switch, chassis, mac,
               user, **arguments):

        if not (machine or switch or chassis or mac):
            raise ArgumentError("Please specify at least one of --chassis, "
                                "--machine, --switch or --mac.")

        if machine:
            dbhw_ent = Machine.get_unique(session, machine, compel=True)
        elif switch:
            dbhw_ent = Switch.get_unique(session, switch, compel=True)
        elif chassis:
            dbhw_ent = Chassis.get_unique(session, chassis, compel=True)
        else:
            dbhw_ent = None

        dbinterface = Interface.get_unique(session, hardware_entity=dbhw_ent,
                                           name=interface, mac=mac, compel=True)
        if not dbhw_ent:
            dbhw_ent = dbinterface.hardware_entity

        if dbinterface.vlans:
            vlans = ", ".join([iface.name for iface in
                               dbinterface.vlans.values()])
            raise ArgumentError("{0} is the parent of the following VLAN "
                                "interfaces, delete them first: "
                                "{1}.".format(dbinterface, vlans))

        if dbinterface.slaves:
            slaves = ", ".join([iface.name for iface in dbinterface.slaves])
            raise ArgumentError("{0} is the master of the following slave "
                                "interfaces, delete them first: "
                                "{1}.".format(dbinterface, slaves))

        try:
            for addr in dbinterface.assignments:
                if addr.ip != dbhw_ent.primary_ip:
                    continue

                # Special handling: if this interface was created automatically,
                # and there is exactly one other interface with no IP address,
                # then re-assign the primary address to that interface
                if not dbinterface.mac and dbinterface.comments is not None and \
                   dbinterface.comments.startswith("Created automatically") and \
                   len(dbhw_ent.interfaces) == 2:
                    if dbinterface == dbhw_ent.interfaces[0]:
                        other = dbhw_ent.interfaces[1]
                    else:
                        other = dbhw_ent.interfaces[0]

                    if len(other.assignments) == 0:
                        assign_address(other, dbhw_ent.primary_ip,
                                       dbhw_ent.primary_name.network)
                        dbinterface.addresses.remove(dbhw_ent.primary_ip)
                        raise _Goto

                # If this is a machine, it is possible to delete the host to get rid
                # of the primary name
                if dbhw_ent.hardware_type == "machine":
                    msg = "  You should delete the host first."
                else:
                    msg = ""

                raise ArgumentError("{0} holds the primary address of the {1:cl}, "
                                    "therefore it cannot be deleted."
                                    "{2}".format(dbinterface, dbhw_ent, msg))
        except _Goto:
            pass

        addrs = ", ".join(["%s: %s" % (addr.logical_name, addr.ip) for addr in
                           dbinterface.assignments])
        if addrs:
            raise ArgumentError("{0} still has the following addresses "
                                "configured, delete them first: "
                                "{1}.".format(dbinterface, addrs))

        dbhw_ent.interfaces.remove(dbinterface)
        session.flush()

        if dbhw_ent.hardware_type == 'machine':
            plenary_info = PlenaryMachineInfo(dbhw_ent, logger=logger)
            plenary_info.write()
        return
Ejemplo n.º 32
0
    def render(self, session, logger, hostname, machine, archetype,
               buildstatus, personality, osname, osversion, service, instance,
               model, machine_type, vendor, serial, cluster, guest_on_cluster,
               guest_on_share, member_cluster_share, domain, sandbox, branch,
               sandbox_owner, dns_domain, shortname, mac, ip, networkip,
               network_environment, exact_location, server_of_service,
               server_of_instance, grn, eon_id, fullinfo, **arguments):
        dbnet_env = NetworkEnvironment.get_unique_or_default(
            session, network_environment)

        q = session.query(Host)

        if machine:
            dbmachine = Machine.get_unique(session, machine, compel=True)
            q = q.filter_by(machine=dbmachine)

        # Add the machine definition and the primary name. Use aliases to make
        # sure the end result will be ordered by primary name.
        PriDns = aliased(DnsRecord)
        PriFqdn = aliased(Fqdn)
        PriDomain = aliased(DnsDomain)
        q = q.join(Machine, (PriDns, PriDns.id == Machine.primary_name_id),
                   (PriFqdn, PriDns.fqdn_id == PriFqdn.id),
                   (PriDomain, PriFqdn.dns_domain_id == PriDomain.id))
        q = q.order_by(PriFqdn.name, PriDomain.name)
        q = q.options(
            contains_eager('machine'),
            contains_eager('machine.primary_name', alias=PriDns),
            contains_eager('machine.primary_name.fqdn', alias=PriFqdn),
            contains_eager('machine.primary_name.fqdn.dns_domain',
                           alias=PriDomain))
        q = q.reset_joinpoint()

        # Hardware-specific filters
        dblocation = get_location(session, **arguments)
        if dblocation:
            if exact_location:
                q = q.filter(Machine.location == dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Machine.location_id.in_(childids))

        if model or vendor or machine_type:
            subq = Model.get_matching_query(session,
                                            name=model,
                                            vendor=vendor,
                                            machine_type=machine_type,
                                            compel=True)
            q = q.filter(Machine.model_id.in_(subq))

        if serial:
            self.deprecated_option(
                "serial",
                "Please use search machine --serial instead.",
                logger=logger,
                **arguments)
            q = q.filter(Machine.serial_no == serial)

        # DNS IP address related filters
        if mac or ip or networkip or hostname or dns_domain or shortname:
            # Inner joins are cheaper than outer joins, so make some effort to
            # use inner joins when possible
            if mac or ip or networkip:
                q = q.join(Interface)
            else:
                q = q.outerjoin(Interface)
            if ip or networkip:
                q = q.join(AddressAssignment, Network, from_joinpoint=True)
            else:
                q = q.outerjoin(AddressAssignment,
                                Network,
                                from_joinpoint=True)

            if mac:
                self.deprecated_option("mac", "Please use search machine "
                                       "--mac instead.",
                                       logger=logger,
                                       **arguments)
                q = q.filter(Interface.mac == mac)
            if ip:
                q = q.filter(AddressAssignment.ip == ip)
                q = q.filter(Network.network_environment == dbnet_env)
            if networkip:
                dbnetwork = get_network_byip(session, networkip, dbnet_env)
                q = q.filter(AddressAssignment.network == dbnetwork)

            dbdns_domain = None
            if hostname:
                (shortname, dbdns_domain) = parse_fqdn(session, hostname)
            if dns_domain:
                dbdns_domain = DnsDomain.get_unique(session,
                                                    dns_domain,
                                                    compel=True)

            if shortname or dbdns_domain:
                ARecAlias = aliased(ARecord)
                ARecFqdn = aliased(Fqdn)

                q = q.outerjoin(
                    (ARecAlias,
                     and_(ARecAlias.ip == AddressAssignment.ip,
                          ARecAlias.network_id
                          == AddressAssignment.network_id)),
                    (ARecFqdn, ARecAlias.fqdn_id == ARecFqdn.id))
                if shortname:
                    q = q.filter(
                        or_(ARecFqdn.name == shortname,
                            PriFqdn.name == shortname))
                if dbdns_domain:
                    q = q.filter(
                        or_(ARecFqdn.dns_domain == dbdns_domain,
                            PriFqdn.dns_domain == dbdns_domain))
            q = q.reset_joinpoint()

        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     branch=branch)
        if sandbox_owner:
            dbauthor = get_user_principal(session, sandbox_owner)

        if dbbranch:
            q = q.filter_by(branch=dbbranch)
        if dbauthor:
            q = q.filter_by(sandbox_author=dbauthor)

        if archetype:
            # Added to the searches as appropriate below.
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        if personality and archetype:
            dbpersonality = Personality.get_unique(session,
                                                   archetype=dbarchetype,
                                                   name=personality,
                                                   compel=True)
            q = q.filter_by(personality=dbpersonality)
        elif personality:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(name=personality)
            q = q.reset_joinpoint()
        elif archetype:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(archetype=dbarchetype)
            q = q.reset_joinpoint()

        if buildstatus:
            dbbuildstatus = HostLifecycle.get_unique(session,
                                                     buildstatus,
                                                     compel=True)
            q = q.filter_by(status=dbbuildstatus)

        if osname and osversion and archetype:
            # archetype was already resolved above
            dbos = OperatingSystem.get_unique(session,
                                              name=osname,
                                              version=osversion,
                                              archetype=dbarchetype,
                                              compel=True)
            q = q.filter_by(operating_system=dbos)
        elif osname or osversion:
            q = q.join('operating_system')
            if osname:
                q = q.filter_by(name=osname)
            if osversion:
                q = q.filter_by(version=osversion)
            q = q.reset_joinpoint()

        if service:
            dbservice = Service.get_unique(session, service, compel=True)
            if instance:
                dbsi = get_service_instance(session, dbservice, instance)
                q = q.filter(Host.services_used.contains(dbsi))
            else:
                q = q.join('services_used')
                q = q.filter_by(service=dbservice)
                q = q.reset_joinpoint()
        elif instance:
            q = q.join('services_used')
            q = q.filter_by(name=instance)
            q = q.reset_joinpoint()

        if server_of_service:
            dbserver_service = Service.get_unique(session,
                                                  server_of_service,
                                                  compel=True)
            if server_of_instance:
                dbssi = get_service_instance(session, dbserver_service,
                                             server_of_instance)
                q = q.join('_services_provided')
                q = q.filter_by(service_instance=dbssi)
                q = q.reset_joinpoint()
            else:
                q = q.join('_services_provided', 'service_instance')
                q = q.filter_by(service=dbserver_service)
                q = q.reset_joinpoint()
        elif server_of_instance:
            q = q.join('_services_provided', 'service_instance')
            q = q.filter_by(name=server_of_instance)
            q = q.reset_joinpoint()

        if cluster:
            dbcluster = Cluster.get_unique(session, cluster, compel=True)
            if isinstance(dbcluster, MetaCluster):
                q = q.join('_cluster', 'cluster', '_metacluster')
                q = q.filter_by(metacluster=dbcluster)
            else:
                q = q.filter_by(cluster=dbcluster)
            q = q.reset_joinpoint()
        if guest_on_cluster:
            # TODO: this does not handle metaclusters according to Wes
            dbcluster = Cluster.get_unique(session,
                                           guest_on_cluster,
                                           compel=True)
            q = q.join('machine', VirtualMachine, ClusterResource)
            q = q.filter_by(cluster=dbcluster)
            q = q.reset_joinpoint()
        if guest_on_share:
            #v2
            v2shares = session.query(
                Share.id).filter_by(name=guest_on_share).all()
            if not v2shares:
                raise NotFoundException(
                    "No shares found with name {0}.".format(guest_on_share))

            NasAlias = aliased(VirtualDisk)
            q = q.join('machine', 'disks', (NasAlias, NasAlias.id == Disk.id))
            q = q.filter(NasAlias.share_id.in_(map(lambda s: s[0], v2shares)))
            q = q.reset_joinpoint()

        if member_cluster_share:
            #v2
            v2shares = session.query(
                Share.id).filter_by(name=member_cluster_share).all()
            if not v2shares:
                raise NotFoundException(
                    "No shares found with name {0}.".format(guest_on_share))

            NasAlias = aliased(VirtualDisk)

            q = q.join('_cluster', 'cluster', 'resholder', VirtualMachine,
                       'machine', 'disks', (NasAlias, NasAlias.id == Disk.id))
            q = q.filter(NasAlias.share_id.in_(map(lambda s: s[0], v2shares)))
            q = q.reset_joinpoint()

        if grn or eon_id:
            dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False)

            persq = session.query(Personality.id)
            persq = persq.outerjoin(PersonalityGrnMap)
            persq = persq.filter(
                or_(Personality.owner_eon_id == dbgrn.eon_id,
                    PersonalityGrnMap.eon_id == dbgrn.eon_id))
            q = q.outerjoin(HostGrnMap)
            q = q.filter(
                or_(Host.owner_eon_id == dbgrn.eon_id,
                    HostGrnMap.eon_id == dbgrn.eon_id,
                    Host.personality_id.in_(persq.subquery())))
            q = q.reset_joinpoint()

        if fullinfo:
            return q.all()
        return SimpleHostList(q.all())
Ejemplo n.º 33
0
    def render(self, session, logger, machine, model, vendor, serial,
               chassis, slot, clearchassis, multislot,
               vmhost, cluster, allow_metacluster_change,
               cpuname, cpuvendor, cpuspeed, cpucount, memory, ip,
               **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        plenaries = PlenaryCollection(logger=logger)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        if clearchassis:
            del dbmachine.chassis_slot[:]

        remove_plenaries = PlenaryCollection(logger=logger)
        if chassis:
            dbchassis = Chassis.get_unique(session, chassis, compel=True)
            if machine_plenary_will_move(old=dbmachine.location,
                                         new=dbchassis.location):
                remove_plenaries.append(Plenary.get_plenary(dbmachine))
            dbmachine.location = dbchassis.location
            if slot is None:
                raise ArgumentError("Option --chassis requires --slot "
                                    "information.")
            self.adjust_slot(session, logger,
                             dbmachine, dbchassis, slot, multislot)
        elif slot is not None:
            dbchassis = None
            for dbslot in dbmachine.chassis_slot:
                if dbchassis and dbslot.chassis != dbchassis:
                    raise ArgumentError("Machine in multiple chassis, please "
                                        "use --chassis argument.")
                dbchassis = dbslot.chassis
            if not dbchassis:
                raise ArgumentError("Option --slot requires --chassis "
                                    "information.")
            self.adjust_slot(session, logger,
                             dbmachine, dbchassis, slot, multislot)

        dblocation = get_location(session, **arguments)
        if dblocation:
            loc_clear_chassis = False
            for dbslot in dbmachine.chassis_slot:
                dbcl = dbslot.chassis.location
                if dbcl != dblocation:
                    if chassis or slot is not None:
                        raise ArgumentError("{0} conflicts with chassis {1!s} "
                                            "location {2}.".format(dblocation,
                                                        dbslot.chassis, dbcl))
                    else:
                        loc_clear_chassis = True
            if loc_clear_chassis:
                del dbmachine.chassis_slot[:]
            if machine_plenary_will_move(old=dbmachine.location,
                                         new=dblocation):
                remove_plenaries.append(Plenary.get_plenary(dbmachine))
            dbmachine.location = dblocation

        if model or vendor:
            # If overriding model, should probably overwrite default
            # machine specs as well.
            if not model:
                model = dbmachine.model.name
            if not vendor:
                vendor = dbmachine.model.vendor.name
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       compel=True)
            if dbmodel.machine_type not in ['blade', 'rackmount',
                                            'workstation', 'aurora_node',
                                            'virtual_machine']:
                raise ArgumentError("The update_machine command cannot update "
                                    "machines of type %s." %
                                    dbmodel.machine_type)
            # We probably could do this by forcing either cluster or
            # location data to be available as appropriate, but really?
            # Failing seems reasonable.
            if dbmodel.machine_type != dbmachine.model.machine_type and \
               'virtual_machine' in [dbmodel.machine_type,
                                     dbmachine.model.machine_type]:
                raise ArgumentError("Cannot change machine from %s to %s." %
                                    (dbmachine.model.machine_type,
                                     dbmodel.machine_type))

            old_nic_model = dbmachine.model.nic_model
            new_nic_model = dbmodel.nic_model
            if old_nic_model != new_nic_model:
                for iface in dbmachine.interfaces:
                    if iface.model == old_nic_model:
                        iface.model = new_nic_model

            dbmachine.model = dbmodel

        if cpuname or cpuvendor or cpuspeed is not None:
            dbcpu = Cpu.get_unique(session, name=cpuname, vendor=cpuvendor,
                                   speed=cpuspeed, compel=True)
            dbmachine.cpu = dbcpu

        if cpucount is not None:
            dbmachine.cpu_quantity = cpucount
        if memory is not None:
            dbmachine.memory = memory
        if serial:
            dbmachine.serial_no = serial

        if ip:
            update_primary_ip(session, dbmachine, ip)

        # FIXME: For now, if a machine has its interface(s) in a portgroup
        # this command will need to be followed by an update_interface to
        # re-evaluate the portgroup for overflow.
        # It would be better to have --pg and --autopg options to let it
        # happen at this point.
        if cluster or vmhost:
            if not dbmachine.vm_container:
                raise ArgumentError("Cannot convert a physical machine to "
                                    "virtual.")

            old_holder = dbmachine.vm_container.holder.holder_object
            resholder = get_resource_holder(session, hostname=vmhost,
                                            cluster=cluster, compel=False)
            new_holder = resholder.holder_object

            # TODO: do we want to allow moving machines between the cluster and
            # metacluster level?
            if new_holder.__class__ != old_holder.__class__:
                raise ArgumentError("Cannot move a VM between a cluster and a "
                                    "stand-alone host.")

            if cluster:
                if new_holder.metacluster != old_holder.metacluster \
                   and not allow_metacluster_change:
                    raise ArgumentError("Current {0:l} does not match "
                                        "new {1:l}."
                                        .format(old_holder.metacluster,
                                                new_holder.metacluster))

            remove_plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
            dbmachine.vm_container.holder = resholder

            for dbdisk in dbmachine.disks:
                if not isinstance(dbdisk, VirtualDisk):
                    continue
                old_share = dbdisk.share
                if isinstance(old_share.holder, BundleResource):
                    resourcegroup = old_share.holder.name
                else:
                    resourcegroup = None
                new_share = find_share(new_holder, resourcegroup, old_share.name,
                                       error=ArgumentError)

                # If the shares are registered at the metacluster level and both
                # clusters are in the same metacluster, then there will be no
                # real change here
                if new_share != old_share:
                    old_share.disks.remove(dbdisk)
                    new_share.disks.append(dbdisk)

            if isinstance(new_holder, Cluster):
                dbmachine.location = new_holder.location_constraint
            else:
                dbmachine.location = new_holder.location

            session.flush()
            plenaries.append(Plenary.get_plenary(old_holder))
            plenaries.append(Plenary.get_plenary(new_holder))

        if dbmachine.vm_container:
            plenaries.append(Plenary.get_plenary(dbmachine.vm_container))

        session.flush()

        # Check if the changed parameters still meet cluster capacity
        # requiremets
        if dbmachine.cluster:
            dbmachine.cluster.validate()
            if allow_metacluster_change:
                dbmachine.cluster.metacluster.validate()
        if dbmachine.host and dbmachine.host.cluster:
            dbmachine.host.cluster.validate()

        # The check to make sure a plenary file is not written out for
        # dummy aurora hardware is within the call to write().  This way
        # it is consistent without altering (and forgetting to alter)
        # all the calls to the method.
        plenaries.append(Plenary.get_plenary(dbmachine))
        if remove_plenaries.plenaries and dbmachine.host:
            plenaries.append(Plenary.get_plenary(dbmachine.host))

        key = CompileKey.merge([plenaries.get_write_key(),
                                remove_plenaries.get_remove_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            plenaries.write(locked=True)
            remove_plenaries.remove(locked=True)

            if dbmachine.host:
                # XXX: May need to reconfigure.
                pass

            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.update_host(dbmachine, oldinfo)
            dsdb_runner.commit_or_rollback("Could not update machine in DSDB")
        except:
            plenaries.restore_stash()
            remove_plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Ejemplo n.º 34
0
    def render(self, session, logger, hostname, machine, archetype, domain,
               sandbox, osname, osversion, buildstatus, personality, comments,
               zebra_interfaces, grn, eon_id, skip_dsdb_check=False,
               **arguments):
        dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        section = "archetype_" + dbarchetype.name

        # This is for the various add_*_host commands
        if not domain and not sandbox:
            domain = self.config.get(section, "host_domain")

        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError("Adding hosts to {0:l} is not allowed."
                                .format(dbbranch))

        if not buildstatus:
            buildstatus = 'build'
        dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True)
        dbmachine = Machine.get_unique(session, machine, compel=True)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        if not personality:
            if self.config.has_option(section, "default_personality"):
                personality = self.config.get(section, "default_personality")
            else:
                personality = 'generic'
        dbpersonality = Personality.get_unique(session, name=personality,
                                               archetype=dbarchetype, compel=True)

        if not osname:
            if self.config.has_option(section, "default_osname"):
                osname = self.config.get(section, "default_osname")
        if not osversion:
            if self.config.has_option(section, "default_osversion"):
                osversion = self.config.get(section, "default_osversion")

        if not osname or not osversion:
            raise ArgumentError("Can not determine a sensible default OS "
                                "for archetype %s. Please use the "
                                "--osname and --osversion parameters." %
                                (dbarchetype.name))

        dbos = OperatingSystem.get_unique(session, name=osname,
                                          version=osversion,
                                          archetype=dbarchetype, compel=True)

        if (dbmachine.model.machine_type == 'aurora_node' and
                dbpersonality.archetype.name != 'aurora'):
            raise ArgumentError("Machines of type aurora_node can only be "
                                "added with archetype aurora.")

        if dbmachine.host:
            raise ArgumentError("{0:c} {0.label} is already allocated to "
                                "{1:l}.".format(dbmachine, dbmachine.host))

        if grn or eon_id:
            dbgrn = lookup_grn(session, grn, eon_id, logger=logger,
                               config=self.config)
        else:
            dbgrn = dbpersonality.owner_grn

        dbhost = Host(machine=dbmachine, branch=dbbranch, owner_grn=dbgrn,
                      sandbox_author=dbauthor, personality=dbpersonality,
                      status=dbstatus, operating_system=dbos, comments=comments)
        session.add(dbhost)

        if self.config.has_option("archetype_" + archetype, "default_grn_target"):
            dbhost.grns.append((dbhost, dbgrn,
                                self.config.get("archetype_" + archetype,
                                                "default_grn_target")))

        if zebra_interfaces:
            # --autoip does not make sense for Zebra (at least not the way it's
            # implemented currently)
            dbinterface = None
        else:
            dbinterface = get_boot_interface(dbmachine)

        # This method is allowed to return None. This can only happen
        # (currently) using add_aurora_host, add_windows_host, or possibly by
        # bypassing the aq client and posting a request directly.
        audit_results = []
        ip = generate_ip(session, logger, dbinterface,
                         audit_results=audit_results, **arguments)

        dbdns_rec, newly_created = grab_address(session, hostname, ip,
                                                allow_restricted_domain=True,
                                                allow_reserved=True,
                                                preclude=True)
        dbmachine.primary_name = dbdns_rec

        # Fix up auxiliary addresses to point to the primary name by default
        if ip:
            dns_env = dbdns_rec.fqdn.dns_environment

            for addr in dbmachine.all_addresses():
                if addr.interface.interface_type == "management":
                    continue
                if addr.service_address_id:  # pragma: no cover
                    continue
                for rec in addr.dns_records:
                    if rec.fqdn.dns_environment == dns_env:
                        rec.reverse_ptr = dbdns_rec.fqdn

        if zebra_interfaces:
            if not ip:
                raise ArgumentError("Zebra configuration requires an IP address.")
            dbsrv_addr = self.assign_zebra_address(session, dbmachine, dbdns_rec,
                                                   zebra_interfaces)
        else:
            if ip:
                if not dbinterface:
                    raise ArgumentError("You have specified an IP address for the "
                                        "host, but {0:l} does not have a bootable "
                                        "interface.".format(dbmachine))
                assign_address(dbinterface, ip, dbdns_rec.network)
            dbsrv_addr = None

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if dbmachine.vm_container:
            plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
        if dbsrv_addr:
            plenaries.append(Plenary.get_plenary(dbsrv_addr))

        key = plenaries.get_write_key()
        try:
            lock_queue.acquire(key)
            plenaries.write(locked=True)

            # XXX: This (and some of the code above) is horrible.  There
            # should be a generic/configurable hook here that could kick
            # in based on archetype and/or domain.
            dsdb_runner = DSDBRunner(logger=logger)
            if dbhost.archetype.name == 'aurora':
                # For aurora, check that DSDB has a record of the host.
                if not skip_dsdb_check:
                    try:
                        dsdb_runner.show_host(hostname)
                    except ProcessException, e:
                        raise ArgumentError("Could not find host in DSDB: %s" % e)
            elif not dbmachine.primary_ip:
                logger.info("No IP for %s, not adding to DSDB." % dbmachine.fqdn)
Ejemplo n.º 35
0
    def render(self, session, logger, interface, machine, mac, model, vendor,
               boot, pg, autopg, comments, master, clear_master, default_route,
               rename_to, **arguments):
        """This command expects to locate an interface based only on name
        and machine - all other fields, if specified, are meant as updates.

        If the machine has a host, dsdb may need to be updated.

        The boot flag can *only* be set to true.  This is mostly technical,
        as at this point in the interface it is difficult to tell if the
        flag was unset or set to false.  However, it also vastly simplifies
        the dsdb logic - we never have to worry about a user trying to
        remove the boot flag from a host in dsdb.

        """

        audit_results = []

        dbhw_ent = Machine.get_unique(session, machine, compel=True)
        dbinterface = Interface.get_unique(session, hardware_entity=dbhw_ent,
                                           name=interface, compel=True)

        oldinfo = DSDBRunner.snapshot_hw(dbhw_ent)

        if arguments.get('hostname', None):
            # Hack to set an intial interface for an aurora host...
            dbhost = dbhw_ent.host
            if dbhost.archetype.name == 'aurora' and \
               dbhw_ent.primary_ip and not dbinterface.addresses:
                assign_address(dbinterface, dbhw_ent.primary_ip,
                               dbhw_ent.primary_name.network, logger=logger)

        # We may need extra IP verification (or an autoip option)...
        # This may also throw spurious errors if attempting to set the
        # port_group to a value it already has.
        if pg is not None and dbinterface.port_group != pg.lower().strip():
            dbinterface.port_group = verify_port_group(
                dbinterface.hardware_entity, pg)
        elif autopg:
            dbinterface.port_group = choose_port_group(
                session, logger, dbinterface.hardware_entity)
            audit_results.append(('pg', dbinterface.port_group))

        if master:
            if dbinterface.addresses:
                # FIXME: as a special case, if the only address is the
                # primary IP, then we could just move it to the master
                # interface. However this can be worked around by bonding
                # the interface before calling "add host", so don't bother
                # for now.
                raise ArgumentError("Can not enslave {0:l} because it has "
                                    "addresses.".format(dbinterface))
            dbmaster = Interface.get_unique(session, hardware_entity=dbhw_ent,
                                            name=master, compel=True)
            if dbmaster in dbinterface.all_slaves():
                raise ArgumentError("Enslaving {0:l} would create a circle, "
                                    "which is not allowed.".format(dbinterface))
            dbinterface.master = dbmaster

        if clear_master:
            if not dbinterface.master:
                raise ArgumentError("{0} is not a slave.".format(dbinterface))
            dbinterface.master = None

        if comments:
            dbinterface.comments = comments

        if boot:
            # Figure out if the current bootble interface also has the
            # default route set; the new bootable interface probably
            # wants to have the same settings.  Note that if
            # old_default_route is None there was no bootable interface.
            old_default_route = None
            for i in dbhw_ent.interfaces:
                if i.bootable == True:
                    old_default_route = i.default_route
                    break

            # Apply the bootable flag to the supplied interface, clearing
            # it on all other interfaces.
            for i in dbhw_ent.interfaces:
                if i == dbinterface:
                    i.bootable = True
                else:
                    i.bootable = False

            # If the user was not explicit about the default route flag
            # (default_route is None); there was an existing bootable
            # interface (old_default_route is not None); the new default
            # route setting differs from the old - then produce a warning.
            if (default_route is None and
                old_default_route is not None and
                dbinterface.default_route != old_default_route):
                if old_default_route:
                    logger.client_info("Warning: New boot interface {0} is no "
                                       "longer provides the default route; it "
                                       "did before!".format(dbinterface))
                else:
                    logger.client_info("Warning: New boot interface {0} now "
                                       "provides the default route; it didn't "
                                       "before!".format(dbinterface))

            # Should we also transfer the primary IP to the new boot interface?
            # That could get tricky if the new interface already has an IP
            # address...

        if default_route is not None:
            dbinterface.default_route = default_route
            if not first_of(dbhw_ent.interfaces, lambda x: x.default_route):
                logger.client_info("Warning: {0:l} has no default route, hope "
                                   "that's ok.".format(dbhw_ent))

        #Set this mac address last so that you can update to a bootable
        #interface *before* adding a mac address. This is so the validation
        #that takes place in the interface class doesn't have to be worried
        #about the order of update to bootable=True and mac address
        if mac:
            q = session.query(Interface).filter_by(mac=mac)
            other = q.first()
            if other and other != dbinterface:
                raise ArgumentError("MAC address {0} is already in use by "
                                    "{1:l}.".format(mac, other))
            dbinterface.mac = mac

        if model or vendor:
            if not dbinterface.model_allowed:
                raise ArgumentError("Model/vendor can not be set for a {0:lc}."
                                    .format(dbinterface))

            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       model_type=NicType.Nic, compel=True)
            dbinterface.model = dbmodel
        if rename_to:
            rename_interface(session, dbinterface, rename_to)

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbhw_ent))
        # Interface renaming affects the host and service addresses
        if dbhw_ent.host:
            plenaries.append(Plenary.get_plenary(dbhw_ent.host))
        for addr in dbinterface.assignments:
            if addr.service_address:
                plenaries.append(Plenary.get_plenary(addr.service_address))

        with plenaries.get_key():
            try:
                plenaries.write(locked=True)

                if dbhw_ent.host and dbhw_ent.host.archetype.name != "aurora":
                    dsdb_runner = DSDBRunner(logger=logger)
                    dsdb_runner.update_host(dbhw_ent, oldinfo)
                    dsdb_runner.commit_or_rollback()
            except AquilonError, err:
                plenaries.restore_stash()
                raise ArgumentError(err)
            except:
Ejemplo n.º 36
0
    def render(self, session, logger,
               # search_cluster
               archetype, cluster_type, personality,
               domain, sandbox, branch, buildstatus,
               allowed_archetype, allowed_personality,
               down_hosts_threshold, down_maint_threshold, max_members,
               member_archetype, member_hostname, member_personality,
               capacity_override, cluster, esx_guest, instance,
               esx_metacluster, service, share, esx_share,
               esx_switch, esx_virtual_machine,
               fullinfo, style, **arguments):

        if esx_share:
            self.deprecated_option("esx_share", "Please use --share instead.",
                                   logger=logger, **arguments)
            share = esx_share

        if cluster_type == 'esx':
            cls = EsxCluster
        else:
            cls = Cluster

        # Don't load full objects if we only want to show their name
        if fullinfo or style != 'raw':
            q = session.query(cls)
        else:
            q = session.query(cls.name)

        # The ORM automatically de-duplicates the result if we query full
        # objects, but not when we query just the names. Tell the DB to do so.
        q = q.distinct()

        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     branch=branch)
        if dbbranch:
            q = q.filter_by(branch=dbbranch)
        if dbauthor:
            q = q.filter_by(sandbox_author=dbauthor)

        if archetype:
            # Added to the searches as appropriate below.
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        if personality and archetype:
            dbpersonality = Personality.get_unique(session,
                                                   archetype=dbarchetype,
                                                   name=personality,
                                                   compel=True)
            q = q.filter_by(personality=dbpersonality)
        elif personality:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(name=personality)
            q = q.reset_joinpoint()
        elif archetype:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(archetype=dbarchetype)
            q = q.reset_joinpoint()

        if buildstatus:
            dbbuildstatus = ClusterLifecycle.get_unique(session, buildstatus,
                                                     compel=True)
            q = q.filter_by(status=dbbuildstatus)

        if cluster_type:
            q = q.filter_by(cluster_type=cluster_type)

        # Go through the arguments and make special dicts for each
        # specific set of location arguments that are stripped of the
        # given prefix.
        location_args = {'cluster_': {}, 'member_': {}}
        for prefix in location_args.keys():
            for (k, v) in arguments.items():
                if k.startswith(prefix):
                    # arguments['cluster_building'] = 'dd'
                    # becomes
                    # location_args['cluster_']['building'] = 'dd'
                    location_args[prefix][k.replace(prefix, '')] = v

        dblocation = get_location(session, **location_args['cluster_'])
        if dblocation:
            if location_args['cluster_']['exact_location']:
                q = q.filter_by(location_constraint=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Cluster.location_constraint_id.in_(childids))
        dblocation = get_location(session, **location_args['member_'])
        if dblocation:
            q = q.join('_hosts', 'host', 'machine')
            if location_args['member_']['exact_location']:
                q = q.filter_by(location=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Machine.location_id.in_(childids))
            q = q.reset_joinpoint()

        # esx stuff
        if cluster:
            q = q.filter_by(name=cluster)
        if esx_metacluster:
            dbmetacluster = MetaCluster.get_unique(session, esx_metacluster,
                                                   compel=True)
            q = q.join('_metacluster')
            q = q.filter_by(metacluster=dbmetacluster)
            q = q.reset_joinpoint()
        if esx_virtual_machine:
            dbvm = Machine.get_unique(session, esx_virtual_machine, compel=True)
            # TODO: support VMs inside resource groups?
            q = q.join(ClusterResource, VirtualMachine)
            q = q.filter_by(machine=dbvm)
            q = q.reset_joinpoint()
        if esx_guest:
            dbguest = hostname_to_host(session, esx_guest)
            # TODO: support VMs inside resource groups?
            q = q.join(ClusterResource, VirtualMachine, Machine)
            q = q.filter_by(host=dbguest)
            q = q.reset_joinpoint()
        if capacity_override:
            q = q.filter(EsxCluster.memory_capacity != None)
        if esx_switch:
            dbswitch = Switch.get_unique(session, esx_switch, compel=True)
            q = q.filter_by(switch=dbswitch)

        if service:
            dbservice = Service.get_unique(session, name=service, compel=True)
            if instance:
                dbsi = ServiceInstance.get_unique(session, name=instance,
                                                  service=dbservice,
                                                  compel=True)
                q = q.filter(Cluster.service_bindings.contains(dbsi))
            else:
                q = q.join('service_bindings')
                q = q.filter_by(service=dbservice)
                q = q.reset_joinpoint()
        elif instance:
            q = q.join('service_bindings')
            q = q.filter_by(name=instance)
            q = q.reset_joinpoint()

        if share:
            # Perform sanity check on the share name
            q2 = session.query(Share)
            q2 = q2.filter_by(name=share)
            if not q2.first():
                raise NotFoundException("Share %s not found." % share)

            CR = aliased(ClusterResource)
            S1 = aliased(Share)
            S2 = aliased(Share)
            RG = aliased(ResourceGroup)
            BR = aliased(BundleResource)
            q = q.join(CR)
            q = q.outerjoin((S1, S1.holder_id == CR.id))
            q = q.outerjoin((RG, RG.holder_id == CR.id),
                            (BR, BR.resourcegroup_id == RG.id),
                            (S2, S2.holder_id == BR.id))
            q = q.filter(or_(S1.name == share, S2.name == share))
            q = q.reset_joinpoint()

        if max_members:
            q = q.filter_by(max_hosts=max_members)

        if down_hosts_threshold:
            (pct, dht) = Cluster.parse_threshold(down_hosts_threshold)
            q = q.filter_by(down_hosts_percent=pct)
            q = q.filter_by(down_hosts_threshold=dht)

        if down_maint_threshold:
            (pct, dmt) = Cluster.parse_threshold(down_maint_threshold)
            q = q.filter_by(down_maint_percent=pct)
            q = q.filter_by(down_maint_threshold=dmt)

        if allowed_archetype:
            # Added to the searches as appropriate below.
            dbaa = Archetype.get_unique(session, allowed_archetype,
                                        compel=True)
        if allowed_personality and allowed_archetype:
            dbap = Personality.get_unique(session, archetype=dbaa,
                                          name=allowed_personality,
                                          compel=True)
            q = q.filter(Cluster.allowed_personalities.contains(dbap))
        elif allowed_personality:
            q = q.join('allowed_personalities')
            q = q.filter_by(name=allowed_personality)
            q = q.reset_joinpoint()
        elif allowed_archetype:
            q = q.join('allowed_personalities')
            q = q.filter_by(archetype=dbaa)
            q = q.reset_joinpoint()

        if member_hostname:
            dbhost = hostname_to_host(session, member_hostname)
            q = q.join('_hosts')
            q = q.filter_by(host=dbhost)
            q = q.reset_joinpoint()

        if member_archetype:
            # Added to the searches as appropriate below.
            dbma = Archetype.get_unique(session, member_archetype, compel=True)
        if member_personality and member_archetype:
            q = q.join('_hosts', 'host')
            dbmp = Personality.get_unique(session, archetype=dbma,
                                          name=member_personality, compel=True)
            q = q.filter_by(personality=dbmp)
            q = q.reset_joinpoint()
        elif member_personality:
            q = q.join('_hosts', 'host', 'personality')
            q = q.filter_by(name=member_personality)
            q = q.reset_joinpoint()
        elif member_archetype:
            q = q.join('_hosts', 'host', 'personality')
            q = q.filter_by(archetype=dbma)
            q = q.reset_joinpoint()

        if cluster_type == 'esx':
            q = q.order_by(EsxCluster.name)
        else:
            q = q.order_by(Cluster.name)

        if fullinfo:
            return q.all()
        return SimpleClusterList(q.all())
Ejemplo n.º 37
0
    def render(self, session, logger, machine, model, vendor, serial, chassis,
               slot, cpuname, cpuvendor, cpuspeed, cpucount, memory, cluster,
               comments, **arguments):
        dblocation = get_location(session,
                                  query_options=[subqueryload('parents'),
                                                 joinedload('parents.dns_maps')],
                                  **arguments)
        if chassis:
            dbchassis = Chassis.get_unique(session, chassis, compel=True)
            if slot is None:
                raise ArgumentError("The --chassis option requires a --slot.")
            if dblocation and dblocation != dbchassis.location:
                raise ArgumentError("{0} conflicts with chassis location "
                                    "{1}.".format(dblocation, dbchassis.location))
            dblocation = dbchassis.location
        elif slot is not None:
            raise ArgumentError("The --slot option requires a --chassis.")

        dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                   compel=True)

        if dbmodel.machine_type not in ['blade', 'rackmount', 'workstation',
                                        'aurora_node', 'virtual_machine']:
            raise ArgumentError("The add_machine command cannot add machines "
                                "of type %(type)s.  Try 'add %(type)s'." %
                    {"type": dbmodel.machine_type})

        if cluster:
            if dbmodel.machine_type != 'virtual_machine':
                raise ArgumentError("Only virtual machines can have a cluster "
                                    "attribute.")
            dbcluster = Cluster.get_unique(session, cluster,
                                           compel=ArgumentError)
            # This test could be either archetype or cluster_type
            if dbcluster.personality.archetype.name != 'esx_cluster':
                raise ArgumentError("Can only add virtual machines to "
                                    "clusters with archetype esx_cluster.")
            # TODO implement the same to vmhosts.
            if dbcluster.status.name == 'decommissioned':
                raise ArgumentError("Cannot add virtual machines to "
                                    "decommissioned clusters.")
            if dblocation and dbcluster.location_constraint != dblocation:
                raise ArgumentError("Cannot override cluster location {0} "
                                    "with location {1}.".format(
                                        dbcluster.location_constraint,
                                        dblocation))
            dblocation = dbcluster.location_constraint
        elif dbmodel.machine_type == 'virtual_machine':
            raise ArgumentError("Virtual machines must be assigned to a "
                                "cluster.")

        Machine.get_unique(session, machine, preclude=True)
        dbmachine = create_machine(session, machine, dblocation, dbmodel,
                                   cpuname, cpuvendor, cpuspeed, cpucount,
                                   memory, serial, comments)

        if chassis:
            # FIXME: Are virtual machines allowed to be in a chassis?
            dbslot = session.query(ChassisSlot).filter_by(chassis=dbchassis,
                    slot_number=slot).first()
            if not dbslot:
                dbslot = ChassisSlot(chassis=dbchassis, slot_number=slot)
            dbslot.machine = dbmachine
            session.add(dbslot)

        if cluster:
            if not dbcluster.resholder:
                dbcluster.resholder = ClusterResource(cluster=dbcluster)
            dbvm = VirtualMachine(machine=dbmachine, name=dbmachine.label,
                                  holder=dbcluster.resholder)
            dbcluster.validate()

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if cluster:
            plenaries.append(Plenary.get_plenary(dbcluster))
            plenaries.append(Plenary.get_plenary(dbvm))

        # The check to make sure a plenary file is not written out for
        # dummy aurora hardware is within the call to write().  This way
        # it is consistent without altering (and forgetting to alter)
        # all the calls to the method.
        plenaries.write()
        return
Ejemplo n.º 38
0
    def render(self, session, network, network_environment, ip, type, side,
               machine, fqdn, cluster, pg, has_dynamic_ranges, fullinfo,
               **arguments):
        """Return a network matching the parameters.

        Some of the search terms can only return a unique network.  For
        those (like ip and fqdn) we proceed with the query anyway.  This
        allows for quick scripted tests like "is the network for X.X.X.X
        a tor_net2?".

        """
        dbnet_env = NetworkEnvironment.get_unique_or_default(
            session, network_environment)
        q = session.query(Network)
        q = q.filter_by(network_environment=dbnet_env)
        if network:
            # Note: the network name is not unique (neither in QIP)
            q = q.filter_by(name=network)
        if ip:
            dbnetwork = get_net_id_from_ip(session, ip, dbnet_env)
            q = q.filter_by(id=dbnetwork.id)
        if type:
            q = q.filter_by(network_type=type)
        if side:
            q = q.filter_by(side=side)
        if machine:
            dbmachine = Machine.get_unique(session, machine, compel=True)
            vlans = []
            if dbmachine.cluster and dbmachine.cluster.switch:
                # If this is a VM on a cluster, consult the VLANs.  There
                # could be functionality here for real hardware to consult
                # interface port groups... there's no real use case yet.
                vlans = [
                    VlanInfo.get_vlan_id(session, i.port_group)
                    for i in dbmachine.interfaces if i.port_group
                ]
                if vlans:
                    q = q.join('observed_vlans')
                    q = q.filter_by(switch=dbmachine.cluster.switch)
                    q = q.filter(ObservedVlan.vlan_id.in_(vlans))
                    q = q.reset_joinpoint()
            if not vlans:
                networks = [
                    addr.network.id for addr in dbmachine.all_addresses()
                ]
                if not networks:
                    msg = "Machine %s has no interfaces " % dbmachine.label
                    if dbmachine.cluster:
                        msg += "with a portgroup or "
                    msg += "assigned to a network."
                    raise ArgumentError(msg)
                q = q.filter(Network.id.in_(networks))
        if fqdn:
            (short, dbdns_domain) = parse_fqdn(session, fqdn)
            dnsq = session.query(ARecord.ip)
            dnsq = dnsq.join(ARecord.fqdn)
            dnsq = dnsq.filter_by(name=short)
            dnsq = dnsq.filter_by(dns_domain=dbdns_domain)
            networks = [
                get_net_id_from_ip(session, addr.ip, dbnet_env).id
                for addr in dnsq.all()
            ]
            q = q.filter(Network.id.in_(networks))
        if cluster:
            dbcluster = Cluster.get_unique(session, cluster, compel=True)
            if dbcluster.switch:
                q = q.join('observed_vlans')
                q = q.filter_by(switch=dbcluster.switch)
                q = q.reset_joinpoint()
            else:
                net_ids = [
                    h.machine.primary_name.network.id for h in dbcluster.hosts
                    if getattr(h.machine.primary_name, "network")
                ]
                q = q.filter(Network.id.in_(net_ids))
        if pg:
            vlan = VlanInfo.get_vlan_id(session, pg, compel=ArgumentError)
            q = q.join('observed_vlans')
            q = q.filter_by(vlan_id=vlan)
            q = q.reset_joinpoint()
        dblocation = get_location(session, **arguments)
        if dblocation:
            if arguments.get('exact_location'):
                q = q.filter_by(location=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Network.location_id.in_(childids))
        if has_dynamic_ranges:
            q = q.filter(
                exists([DynamicStub.dns_record_id],
                       from_obj=DynamicStub.__table__.join(
                           ARecord.__table__)).where(
                               Network.id == DynamicStub.network_id))
        q = q.order_by(Network.ip)
        if fullinfo:
            q = q.options(undefer('comments'))
            return q.all()
        return ShortNetworkList(q.all())
Ejemplo n.º 39
0
    def render(self, session, logger, machine, model, vendor, serial, chassis,
               slot, cpuname, cpuvendor, cpuspeed, cpucount, memory, cluster,
               comments, **arguments):
        dblocation = get_location(session,
                                  query_options=[
                                      subqueryload('parents'),
                                      joinedload('parents.dns_maps')
                                  ],
                                  **arguments)
        if chassis:
            dbchassis = Chassis.get_unique(session, chassis, compel=True)
            if slot is None:
                raise ArgumentError("The --chassis option requires a --slot.")
            if dblocation and dblocation != dbchassis.location:
                raise ArgumentError("{0} conflicts with chassis location "
                                    "{1}.".format(dblocation,
                                                  dbchassis.location))
            dblocation = dbchassis.location
        elif slot is not None:
            raise ArgumentError("The --slot option requires a --chassis.")

        dbmodel = Model.get_unique(session,
                                   name=model,
                                   vendor=vendor,
                                   compel=True)

        if dbmodel.machine_type not in [
                'blade', 'rackmount', 'workstation', 'aurora_node',
                'virtual_machine'
        ]:
            raise ArgumentError("The add_machine command cannot add machines "
                                "of type %(type)s.  Try 'add %(type)s'." %
                                {"type": dbmodel.machine_type})

        if cluster:
            if dbmodel.machine_type != 'virtual_machine':
                raise ArgumentError("Only virtual machines can have a cluster "
                                    "attribute.")
            dbcluster = Cluster.get_unique(session,
                                           cluster,
                                           compel=ArgumentError)
            # This test could be either archetype or cluster_type
            if dbcluster.personality.archetype.name != 'esx_cluster':
                raise ArgumentError("Can only add virtual machines to "
                                    "clusters with archetype esx_cluster.")
            # TODO implement the same to vmhosts.
            if dbcluster.status.name == 'decommissioned':
                raise ArgumentError("Cannot add virtual machines to "
                                    "decommissioned clusters.")
            if dblocation and dbcluster.location_constraint != dblocation:
                raise ArgumentError("Cannot override cluster location {0} "
                                    "with location {1}.".format(
                                        dbcluster.location_constraint,
                                        dblocation))
            dblocation = dbcluster.location_constraint
        elif dbmodel.machine_type == 'virtual_machine':
            raise ArgumentError("Virtual machines must be assigned to a "
                                "cluster.")

        Machine.get_unique(session, machine, preclude=True)
        dbmachine = create_machine(session, machine, dblocation, dbmodel,
                                   cpuname, cpuvendor, cpuspeed, cpucount,
                                   memory, serial, comments)

        if chassis:
            # FIXME: Are virtual machines allowed to be in a chassis?
            dbslot = session.query(ChassisSlot).filter_by(
                chassis=dbchassis, slot_number=slot).first()
            if not dbslot:
                dbslot = ChassisSlot(chassis=dbchassis, slot_number=slot)
            dbslot.machine = dbmachine
            session.add(dbslot)

        if cluster:
            if not dbcluster.resholder:
                dbcluster.resholder = ClusterResource(cluster=dbcluster)
            dbvm = VirtualMachine(machine=dbmachine,
                                  name=dbmachine.label,
                                  holder=dbcluster.resholder)
            dbcluster.validate()

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if cluster:
            plenaries.append(Plenary.get_plenary(dbcluster))
            plenaries.append(Plenary.get_plenary(dbvm))

        # The check to make sure a plenary file is not written out for
        # dummy aurora hardware is within the call to write().  This way
        # it is consistent without altering (and forgetting to alter)
        # all the calls to the method.
        plenaries.write()
        return
Ejemplo n.º 40
0
    def render(self, session, logger, machine, disk, controller, share,
               filesystem, resourcegroup, address, comments, size, boot,
               snapshot, rename_to, **kw):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        dbdisk = Disk.get_unique(session, device_name=disk, machine=dbmachine,
                                 compel=True)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))

        if rename_to:
            Disk.get_unique(session, device_name=rename_to, machine=dbmachine,
                            preclude=True)
            dbdisk.device_name = rename_to

        if comments is not None:
            dbdisk.comments = comments

        if size is not None:
            dbdisk.capacity = size

        if controller:
            if controller not in controller_types:
                raise ArgumentError("%s is not a valid controller type, use "
                                    "one of: %s." %
                                    (controller, ", ".join(controller_types)))
            dbdisk.controller_type = controller

        if boot is not None:
            dbdisk.bootable = boot
            # There should be just one boot disk. We may need to re-think this
            # if we want to model software RAID in the database.
            for disk in dbmachine.disks:
                if disk == dbdisk:
                    continue
                if boot and disk.bootable:
                    disk.bootable = False

        if address:
            # TODO: do we really care? Bus address makes sense for physical
            # disks as well, even if we cannot use that information today.
            if not isinstance(dbdisk, VirtualDisk):
                raise ArgumentError("Bus address can only be set for virtual "
                                    "disks.")
            dbdisk.address = address

        if snapshot is not None:
            if not isinstance(dbdisk, VirtualDisk):
                raise ArgumentError("Snapshot capability can only be set for "
                                    "virtual disks.")
            dbdisk.snapshotable = snapshot

        if share or filesystem:
            if isinstance(dbdisk, VirtualNasDisk):
                old_share = dbdisk.share
                old_share.disks.remove(dbdisk)
            elif isinstance(dbdisk, VirtualLocalDisk):
                old_fs = dbdisk.filesystem
                old_fs.disks.remove(dbdisk)
            else:
                raise ArgumentError("Disk {0!s} of {1:l} is not a virtual "
                                    "disk, changing the backend store is not "
                                    "possible.".format(dbdisk, dbmachine))

            if share:
                if not isinstance(dbdisk, VirtualNasDisk):
                    new_dbdisk = copy_virt_disk(session, VirtualNasDisk, dbdisk)
                    session.delete(dbdisk)
                    session.flush()
                    session.add(new_dbdisk)
                    dbdisk = new_dbdisk

                new_share = find_resource(Share,
                                          dbmachine.vm_container.holder.holder_object,
                                          resourcegroup, share)
                new_share.disks.append(dbdisk)

            if filesystem:
                if not isinstance(dbdisk, VirtualLocalDisk):
                    new_dbdisk = copy_virt_disk(session, VirtualLocalDisk, dbdisk)
                    session.delete(dbdisk)
                    session.flush()
                    session.add(new_dbdisk)
                    dbdisk = new_dbdisk

                new_fs = find_resource(Filesystem,
                                       dbmachine.vm_container.holder.holder_object,
                                       resourcegroup, filesystem)
                new_fs.disks.append(dbdisk)

        session.flush()

        plenaries.write()

        return