def render(self, session, logger, network_device, type, clear, vlan, **arguments):
     NetworkDevice.check_type(type)
     dbnetdev = NetworkDevice.get_unique(session, network_device, compel=True)
     if type is not None and dbnetdev.switch_type != type:
         raise ArgumentError("{0} is not a {1} switch.".format(dbnetdev,
                                                               type))
     return self.poll(session, logger, [dbnetdev], clear, vlan)
Beispiel #2
0
    def render(self, session, logger, network_device, label, model, type, ip,
               interface, iftype, mac, vendor, serial, comments, **arguments):
        dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                   compel=True)

        if not dbmodel.model_type.isNetworkDeviceType():
            raise ArgumentError("This command can only be used to "
                                "add network devices.")

        dblocation = get_location(session, compel=True, **arguments)

        dbdns_rec, newly_created = grab_address(session, network_device, ip,
                                                allow_restricted_domain=True,
                                                allow_reserved=True,
                                                preclude=True)
        if not label:
            label = dbdns_rec.fqdn.name
            try:
                NetworkDevice.check_label(label)
            except ArgumentError:
                raise ArgumentError("Could not deduce a valid hardware label "
                                    "from the network device name.  Please specify "
                                    "--label.")

        # FIXME: What do the error messages for an invalid enum (switch_type)
        # look like?
        dbnetdev = NetworkDevice(label=label, switch_type=type,
                                 location=dblocation, model=dbmodel,
                                 serial_no=serial, comments=comments)
        session.add(dbnetdev)
        dbnetdev.primary_name = dbdns_rec

        check_netdev_iftype(iftype)

        dbinterface = get_or_create_interface(session, dbnetdev,
                                              name=interface, mac=mac,
                                              interface_type=iftype)
        dbnetwork = get_net_id_from_ip(session, ip)
        # TODO: should we call check_ip_restrictions() here?
        assign_address(dbinterface, ip, dbnetwork, logger=logger)

        session.flush()

        plenary = Plenary.get_plenary(dbnetdev, logger=logger)
        with plenary.get_key():
            plenary.stash()
            try:
                plenary.write(locked=True)
                dsdb_runner = DSDBRunner(logger=logger)
                dsdb_runner.update_host(dbnetdev, None)
                dsdb_runner.commit_or_rollback("Could not add network device to DSDB")
            except:
                plenary.restore_stash()
                raise

        return
Beispiel #3
0
 def render(self, session, logger, rack, type, clear, vlan, **arguments):
     dblocation = get_location(session, rack=rack)
     NetworkDevice.check_type(type)
     q = session.query(NetworkDevice)
     q = q.filter_by(location=dblocation)
     if type:
         q = q.filter_by(switch_type=type)
     netdevs = q.all()
     if not netdevs:
         raise NotFoundException("No network device found.")
     return self.poll(session, logger, netdevs, clear, vlan)
Beispiel #4
0
    def render(self, generate, session, logger, network_device, **kwargs):
        dbnetdev = NetworkDevice.get_unique(session, network_device, compel=True)
        plenary_info = Plenary.get_plenary(dbnetdev, logger=logger)

        if generate:
            return plenary_info._generate_content()
        else:
            return plenary_info.read()
Beispiel #5
0
    def render(self, session, logger, network_device, **arguments):
        dbnetdev = NetworkDevice.get_unique(session, network_device, compel=True)

        # Check and complain if the network device has any other addresses than its
        # primary address
        addrs = []
        for addr in dbnetdev.all_addresses():
            if addr.ip == dbnetdev.primary_ip:
                continue
            addrs.append(str(addr.ip))
        if addrs:
            raise ArgumentError("{0} still provides the following addresses, "
                                "delete them first: {1}.".format
                                (dbnetdev, ", ".join(addrs)))

        dbdns_rec = dbnetdev.primary_name
        ip = dbnetdev.primary_ip
        old_fqdn = str(dbnetdev.primary_name.fqdn)
        old_comments = dbnetdev.comments
        session.delete(dbnetdev)
        if dbdns_rec:
            delete_dns_record(dbdns_rec)

        session.flush()

        # Any network device ports hanging off this network device should be deleted with
        # the cascade delete of the network device.

        netdev_plenary = Plenary.get_plenary(dbnetdev, logger=logger)

        # clusters connected to this network device
        plenaries = PlenaryCollection(logger=logger)

        for dbcluster in dbnetdev.esx_clusters:
            plenaries.append(Plenary.get_plenary(dbcluster))

        with CompileKey.merge([netdev_plenary.get_key(), plenaries.get_key()]):
            netdev_plenary.stash()
            try:
                plenaries.write(locked=True)
                netdev_plenary.remove(locked=True)

                if ip:
                    dsdb_runner = DSDBRunner(logger=logger)
                    # FIXME: restore interface name/MAC on rollback
                    dsdb_runner.delete_host_details(old_fqdn, ip,
                                                    comments=old_comments)
                    dsdb_runner.commit_or_rollback("Could not remove network device "
                                                   "from DSDB")
            except:
                plenaries.restore_stash()
                netdev_plenary.restore_stash()
                raise
        return
Beispiel #6
0
 def render(self, session, logger, network_device, switch, port, mac,
            **arguments):
     if switch:
         self.deprecated_option("switch", "Please use --network_device "
                                "instead.", logger=logger, **arguments)
         if not network_device:
             network_device = switch
     q = session.query(ObservedMac)
     if network_device:
         dbnetdev = NetworkDevice.get_unique(session, network_device, compel=True)
         q = q.filter_by(network_device=dbnetdev)
     if port is not None:
         q = q.filter_by(port=port)
     if mac:
         q = q.filter_by(mac_address=mac)
     return q.order_by(ObservedMac.mac_address).all()
 def render(self, session, logger, network_device, discover, **arguments):
     if discover:
         options = []
     else:
         options = [subqueryload('observed_vlans'),
                    undefer('observed_vlans.creation_date'),
                    joinedload('observed_vlans.network'),
                    subqueryload('observed_macs'),
                    undefer('observed_macs.creation_date')]
     dbnetdev = NetworkDevice.get_unique(session, network_device, compel=True,
                                         query_options=options)
     if discover:
         results = discover_network_device(session, logger, self.config,
                                           dbnetdev, True)
         return StringList(results)
     else:
         return dbnetdev
Beispiel #8
0
    def render(self, session, network_device, type, vlan, fullinfo, style,
               **arguments):
        q = search_hardware_entity_query(session, hardware_type=NetworkDevice,
                                         **arguments)
        if type:
            q = q.filter_by(switch_type=type)
        if network_device:
            dbnetdev = NetworkDevice.get_unique(session, network_device, compel=True)
            q = q.filter_by(id=dbnetdev.id)

        if vlan:
            q = q.join("observed_vlans", "vlan").filter_by(vlan_id=vlan)
            q = q.reset_joinpoint()

        # Prefer the primary name for ordering
        q = q.outerjoin(DnsRecord, (Fqdn, DnsRecord.fqdn_id == Fqdn.id),
                        DnsDomain)
        q = q.options(contains_eager('primary_name'),
                      contains_eager('primary_name.fqdn'),
                      contains_eager('primary_name.fqdn.dns_domain'))
        q = q.reset_joinpoint()
        q = q.order_by(Fqdn.name, DnsDomain.name, NetworkDevice.label)

        if fullinfo or style != 'raw':
            q = q.options(joinedload('location'),
                          subqueryload('interfaces'),
                          joinedload('interfaces.assignments'),
                          joinedload('interfaces.assignments.dns_records'),
                          joinedload('interfaces.assignments.network'),
                          subqueryload('observed_macs'),
                          undefer('observed_macs.creation_date'),
                          subqueryload('observed_vlans'),
                          undefer('observed_vlans.creation_date'),
                          joinedload('observed_vlans.network'),
                          subqueryload('model'),
                          # Switches don't have machine specs, but the formatter
                          # checks for their existence anyway
                          joinedload('model.machine_specs'))
            return q.all()
        return StringAttributeList(q.all(), "printable_name")
    def render(self, session, logger, interface, network_device,
               mac, iftype, comments, **arguments):
        for arg in self.invalid_parameters:
            if arguments.get(arg) is not None:
                raise ArgumentError("Cannot use argument --%s when adding an "
                                    "interface to a network device." % arg)

        check_netdev_iftype(iftype)

        dbnetdev = NetworkDevice.get_unique(session, network_device, compel=True)
        oldinfo = DSDBRunner.snapshot_hw(dbnetdev)

        get_or_create_interface(session, dbnetdev, name=interface, mac=mac,
                                interface_type=iftype, comments=comments,
                                preclude=True)

        session.flush()

        dsdb_runner = DSDBRunner(logger=logger)
        dsdb_runner.update_host(dbnetdev, oldinfo)
        dsdb_runner.commit_or_rollback("Could not update network device in DSDB")

        return
Beispiel #10
0
    def render(self, session, logger, cluster, archetype, personality, domain,
               sandbox, max_members, down_hosts_threshold, maint_threshold,
               buildstatus, comments, vm_to_host_ratio, switch, metacluster,
               **arguments):

        validate_nlist_key("cluster", cluster)
        dbpersonality = Personality.get_unique(session, name=personality,
                                               archetype=archetype, compel=True)
        if not dbpersonality.is_cluster:
            raise ArgumentError("%s is not a cluster personality." %
                                personality)

        ctype = dbpersonality.archetype.cluster_type
        section = "archetype_" + dbpersonality.archetype.name

        if not buildstatus:
            buildstatus = "build"
        dbstatus = ClusterLifecycle.get_instance(session, buildstatus)

        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError("Adding clusters to {0:l} is not allowed."
                                .format(dbbranch))

        dbloc = get_location(session, **arguments)
        if not dbloc:
            raise ArgumentError("Adding a cluster requires a location "
                                "constraint.")
        if not dbloc.campus:
            raise ArgumentError("{0} is not within a campus.".format(dbloc))

        if max_members is None:
            if self.config.has_option(section, "max_members_default"):
                max_members = self.config.getint(section, "max_members_default")

        Cluster.get_unique(session, cluster, preclude=True)
        # Not finding the cluster type is an internal consistency issue, so make
        # that show up in the logs by using AquilonError
        clus_type = Cluster.polymorphic_subclass(ctype, "Unknown cluster type",
                                                 error=AquilonError)

        (down_hosts_pct, dht) = Cluster.parse_threshold(down_hosts_threshold)

        kw = {'name': cluster,
              'location_constraint': dbloc,
              'personality': dbpersonality,
              'max_hosts': max_members,
              'branch': dbbranch,
              'sandbox_author': dbauthor,
              'down_hosts_threshold': dht,
              'down_hosts_percent': down_hosts_pct,
              'status': dbstatus,
              'comments': comments}

        if ctype == 'esx':
            if vm_to_host_ratio is None:
                if self.config.has_option(section, "vm_to_host_ratio"):
                    vm_to_host_ratio = self.config.get(section,
                                                       "vm_to_host_ratio")
                else:
                    vm_to_host_ratio = "1:1"
            (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                                 vm_to_host_ratio)
            kw["vm_count"] = vm_count
            kw["host_count"] = host_count

        if switch and hasattr(clus_type, 'network_device'):
            kw['network_device'] = NetworkDevice.get_unique(session,
                                                            switch,
                                                            compel=True)

        if maint_threshold is not None:
            (down_hosts_pct, dht) = Cluster.parse_threshold(maint_threshold)
            kw['down_maint_threshold'] = dht
            kw['down_maint_percent'] = down_hosts_pct

        dbcluster = clus_type(**kw)

        plenaries = PlenaryCollection(logger=logger)

        if metacluster:
            dbmetacluster = MetaCluster.get_unique(session,
                                                   metacluster,
                                                   compel=True)

            dbmetacluster.members.append(dbcluster)

            plenaries.append(Plenary.get_plenary(dbmetacluster))

        session.add(dbcluster)
        session.flush()

        plenaries.append(Plenary.get_plenary(dbcluster))
        plenaries.write()

        return
Beispiel #11
0
    def render(self, session, logger, cluster, personality,
               max_members, fix_location, down_hosts_threshold,
               maint_threshold, comments,
               # ESX specific options
               switch, memory_capacity, clear_overrides, vm_to_host_ratio,
               **arguments):

        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        self.check_cluster_type(dbcluster, forbid=MetaCluster)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        if vm_to_host_ratio:
            self.check_cluster_type(dbcluster, require=EsxCluster)
            (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                                 vm_to_host_ratio)
            dbcluster.vm_count = vm_count
            dbcluster.host_count = host_count

        if switch is not None:
            self.check_cluster_type(dbcluster, require=EsxCluster)
            if switch:
                # FIXME: Verify that any hosts are on the same network
                dbnetdev = NetworkDevice.get_unique(session, switch, compel=True)
                plenaries.append(Plenary.get_plenary(dbnetdev))
            else:
                dbnetdev = None
            dbcluster.network_device = dbnetdev

        if memory_capacity is not None:
            self.check_cluster_type(dbcluster, require=EsxCluster)
            dbcluster.memory_capacity = memory_capacity

        if clear_overrides is not None:
            self.check_cluster_type(dbcluster, require=EsxCluster)
            dbcluster.memory_capacity = None

        update_cluster_location(session, logger, dbcluster, fix_location,
                                plenaries, **arguments)

        if personality:
            archetype = dbcluster.personality.archetype.name
            dbpersonality = Personality.get_unique(session, name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            if not dbpersonality.is_cluster:
                raise ArgumentError("Personality {0} is not a cluster " +
                                    "personality".format(dbpersonality))
            dbcluster.personality = dbpersonality

        if max_members is not None:
            # Allow removing the restriction
            if max_members < 0:
                max_members = None
            dbcluster.max_hosts = max_members

        if comments is not None:
            dbcluster.comments = comments

        if down_hosts_threshold is not None:
            (dbcluster.down_hosts_percent,
             dbcluster.down_hosts_threshold) = \
                Cluster.parse_threshold(down_hosts_threshold)

        if maint_threshold is not None:
            (dbcluster.down_maint_percent,
             dbcluster.down_maint_threshold) = \
                Cluster.parse_threshold(maint_threshold)

        session.flush()
        dbcluster.validate()

        plenaries.write(locked=False)

        return
Beispiel #12
0
    def render(self, session, logger, interface, machine, network_device,
               switch, chassis, mac, user, **arguments):
        if switch:
            self.deprecated_option("switch", "Please use --network_device "
                                   "instead.", logger=logger, user=user,
                                   **arguments)
            if not network_device:
                network_device = switch
        self.require_one_of(machine=machine, network_device=network_device,
                            chassis=chassis, mac=mac)

        if machine:
            dbhw_ent = Machine.get_unique(session, machine, compel=True)
        elif network_device:
            dbhw_ent = NetworkDevice.get_unique(session, network_device, compel=True)
        elif chassis:
            dbhw_ent = Chassis.get_unique(session, chassis, compel=True)
        else:
            dbhw_ent = None

        dbinterface = Interface.get_unique(session, hardware_entity=dbhw_ent,
                                           name=interface, mac=mac, compel=True)
        if not dbhw_ent:
            dbhw_ent = dbinterface.hardware_entity

        if dbinterface.vlans:
            vlans = ", ".join([iface.name for iface in
                               dbinterface.vlans.values()])
            raise ArgumentError("{0} is the parent of the following VLAN "
                                "interfaces, delete them first: "
                                "{1}.".format(dbinterface, vlans))

        if dbinterface.slaves:
            slaves = ", ".join([iface.name for iface in dbinterface.slaves])
            raise ArgumentError("{0} is the master of the following slave "
                                "interfaces, delete them first: "
                                "{1}.".format(dbinterface, slaves))

        for addr in dbinterface.assignments:
            if addr.ip != dbhw_ent.primary_ip:
                continue

            # If this is a machine, it is possible to delete the host to get rid
            # of the primary name
            if dbhw_ent.hardware_type == "machine":
                msg = "  You should delete the host first."
            else:
                msg = ""

            raise ArgumentError("{0} holds the primary address of the {1:cl}, "
                                "therefore it cannot be deleted."
                                "{2}".format(dbinterface, dbhw_ent, msg))

        addrs = ", ".join(["%s: %s" % (addr.logical_name, addr.ip) for addr in
                           dbinterface.assignments])
        if addrs:
            raise ArgumentError("{0} still has the following addresses "
                                "configured, delete them first: "
                                "{1}.".format(dbinterface, addrs))

        dbhw_ent.interfaces.remove(dbinterface)
        session.flush()

        if dbhw_ent.hardware_type == 'machine':
            plenary_info = Plenary.get_plenary(dbhw_ent, logger=logger)
            plenary_info.write()
        return
Beispiel #13
0
    def render(self, session, logger,
               # search_cluster
               archetype, cluster_type, personality,
               domain, sandbox, branch, buildstatus,
               allowed_archetype, allowed_personality,
               down_hosts_threshold, down_maint_threshold, max_members,
               member_archetype, member_hostname, member_personality,
               capacity_override, cluster, esx_guest, instance,
               esx_metacluster, service, share, esx_share,
               esx_switch, esx_virtual_machine,
               fullinfo, style, **arguments):

        if esx_share:
            self.deprecated_option("esx_share", "Please use --share instead.",
                                   logger=logger, **arguments)
            share = esx_share

        if cluster_type:
            cls = Cluster.polymorphic_subclass(cluster_type,
                                               "Unknown cluster type")
        else:
            cls = Cluster

        # Don't load full objects if we only want to show their name
        if fullinfo or style != 'raw':
            q = session.query(cls)
        else:
            q = session.query(cls.name)

        # The ORM automatically de-duplicates the result if we query full
        # objects, but not when we query just the names. Tell the DB to do so.
        q = q.distinct()

        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     branch=branch)
        if dbbranch:
            q = q.filter_by(branch=dbbranch)
        if dbauthor:
            q = q.filter_by(sandbox_author=dbauthor)

        if archetype:
            # Added to the searches as appropriate below.
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        if personality and archetype:
            dbpersonality = Personality.get_unique(session,
                                                   archetype=dbarchetype,
                                                   name=personality,
                                                   compel=True)
            q = q.filter_by(personality=dbpersonality)
        elif personality:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(name=personality)
            q = q.reset_joinpoint()
        elif archetype:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(archetype=dbarchetype)
            q = q.reset_joinpoint()

        if buildstatus:
            dbbuildstatus = ClusterLifecycle.get_instance(session, buildstatus)
            q = q.filter_by(status=dbbuildstatus)

        # Go through the arguments and make special dicts for each
        # specific set of location arguments that are stripped of the
        # given prefix.
        location_args = {'cluster_': {}, 'member_': {}}
        for prefix in location_args.keys():
            for (k, v) in arguments.items():
                if k.startswith(prefix):
                    # arguments['cluster_building'] = 'dd'
                    # becomes
                    # location_args['cluster_']['building'] = 'dd'
                    location_args[prefix][k.replace(prefix, '')] = v

        dblocation = get_location(session, **location_args['cluster_'])
        if dblocation:
            if location_args['cluster_']['exact_location']:
                q = q.filter_by(location_constraint=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Cluster.location_constraint_id.in_(childids))
        dblocation = get_location(session, **location_args['member_'])
        if dblocation:
            q = q.join('_hosts', 'host', 'hardware_entity')
            if location_args['member_']['exact_location']:
                q = q.filter_by(location=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Machine.location_id.in_(childids))
            q = q.reset_joinpoint()

        # esx stuff
        if cluster:
            q = q.filter_by(name=cluster)
        if esx_metacluster:
            dbmetacluster = MetaCluster.get_unique(session, esx_metacluster,
                                                   compel=True)
            q = q.join('_metacluster')
            q = q.filter_by(metacluster=dbmetacluster)
            q = q.reset_joinpoint()
        if esx_virtual_machine:
            dbvm = Machine.get_unique(session, esx_virtual_machine, compel=True)
            # TODO: support VMs inside resource groups?
            q = q.join(ClusterResource, VirtualMachine)
            q = q.filter_by(machine=dbvm)
            q = q.reset_joinpoint()
        if esx_guest:
            dbguest = hostname_to_host(session, esx_guest)
            # TODO: support VMs inside resource groups?
            q = q.join(ClusterResource, VirtualMachine, Machine)
            q = q.filter_by(host=dbguest)
            q = q.reset_joinpoint()
        if capacity_override:
            q = q.filter(EsxCluster.memory_capacity != None)
        if esx_switch:
            dbnetdev = NetworkDevice.get_unique(session, esx_switch, compel=True)
            q = q.filter_by(network_device=dbnetdev)

        if service:
            dbservice = Service.get_unique(session, name=service, compel=True)
            if instance:
                dbsi = ServiceInstance.get_unique(session, name=instance,
                                                  service=dbservice,
                                                  compel=True)
                q = q.filter(Cluster.service_bindings.contains(dbsi))
            else:
                q = q.join('service_bindings')
                q = q.filter_by(service=dbservice)
                q = q.reset_joinpoint()
        elif instance:
            q = q.join('service_bindings')
            q = q.filter_by(name=instance)
            q = q.reset_joinpoint()

        if share:
            # Perform sanity check on the share name
            q2 = session.query(Share)
            q2 = q2.filter_by(name=share)
            if not q2.first():
                raise NotFoundException("Share %s not found." % share)

            CR = aliased(ClusterResource)
            S1 = aliased(Share)
            S2 = aliased(Share)
            RG = aliased(ResourceGroup)
            BR = aliased(BundleResource)
            q = q.join(CR)
            q = q.outerjoin((S1, S1.holder_id == CR.id))
            q = q.outerjoin((RG, RG.holder_id == CR.id),
                            (BR, BR.resourcegroup_id == RG.id),
                            (S2, S2.holder_id == BR.id))
            q = q.filter(or_(S1.name == share, S2.name == share))
            q = q.reset_joinpoint()

        if max_members:
            q = q.filter_by(max_hosts=max_members)

        if down_hosts_threshold:
            (pct, dht) = Cluster.parse_threshold(down_hosts_threshold)
            q = q.filter_by(down_hosts_percent=pct)
            q = q.filter_by(down_hosts_threshold=dht)

        if down_maint_threshold:
            (pct, dmt) = Cluster.parse_threshold(down_maint_threshold)
            q = q.filter_by(down_maint_percent=pct)
            q = q.filter_by(down_maint_threshold=dmt)

        if allowed_archetype:
            # Added to the searches as appropriate below.
            dbaa = Archetype.get_unique(session, allowed_archetype,
                                        compel=True)
        if allowed_personality and allowed_archetype:
            dbap = Personality.get_unique(session, archetype=dbaa,
                                          name=allowed_personality,
                                          compel=True)
            q = q.filter(Cluster.allowed_personalities.contains(dbap))
        elif allowed_personality:
            q = q.join('allowed_personalities')
            q = q.filter_by(name=allowed_personality)
            q = q.reset_joinpoint()
        elif allowed_archetype:
            q = q.join('allowed_personalities')
            q = q.filter_by(archetype=dbaa)
            q = q.reset_joinpoint()

        if member_hostname:
            dbhost = hostname_to_host(session, member_hostname)
            q = q.join('_hosts')
            q = q.filter_by(host=dbhost)
            q = q.reset_joinpoint()

        if member_archetype:
            # Added to the searches as appropriate below.
            dbma = Archetype.get_unique(session, member_archetype, compel=True)
        if member_personality and member_archetype:
            q = q.join('_hosts', 'host')
            dbmp = Personality.get_unique(session, archetype=dbma,
                                          name=member_personality, compel=True)
            q = q.filter_by(personality=dbmp)
            q = q.reset_joinpoint()
        elif member_personality:
            q = q.join('_hosts', 'host', 'personality')
            q = q.filter_by(name=member_personality)
            q = q.reset_joinpoint()
        elif member_archetype:
            q = q.join('_hosts', 'host', 'personality')
            q = q.filter_by(archetype=dbma)
            q = q.reset_joinpoint()

        q = q.order_by(cls.name)

        if fullinfo or style != "raw":
            return q.all()
        return StringAttributeList(q.all(), "name")
Beispiel #14
0
    def render(self, session, logger, network_device, model, type, ip, vendor,
               serial, rename_to, discovered_macs, clear, discover, comments,
               **arguments):
        dbnetdev = NetworkDevice.get_unique(session, network_device, compel=True)

        oldinfo = DSDBRunner.snapshot_hw(dbnetdev)
        plenary = Plenary.get_plenary(dbnetdev, logger=logger)

        if discover:
            discover_network_device(session, logger, self.config,
                                    dbnetdev, False)

        if vendor and not model:
            model = dbnetdev.model.name
        if model:
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       model_type=NetworkDeviceType.Switch,
                                       compel=True)
            dbnetdev.model = dbmodel

        dblocation = get_location(session, **arguments)
        if dblocation:
            dbnetdev.location = dblocation

        if serial is not None:
            dbnetdev.serial_no = serial

        # FIXME: What do the error messages for an invalid enum (switch_type)
        # look like?
        if type:
            NetworkDevice.check_type(type)
            dbnetdev.switch_type = type

        if ip:
            update_primary_ip(session, logger, dbnetdev, ip)

        if comments is not None:
            dbnetdev.comments = comments

        if rename_to:
            # Handling alias renaming would not be difficult in AQDB, but the
            # DSDB synchronization would be painful, so don't do that for now.
            # In theory we should check all configured IP addresses for aliases,
            # but this is the most common case
            if dbnetdev.primary_name and dbnetdev.primary_name.fqdn.aliases:
                raise ArgumentError("The network device has aliases and it cannot be "
                                    "renamed. Please remove all aliases first.")
            rename_hardware(session, dbnetdev, rename_to)

        if clear:
            session.query(ObservedMac).filter_by(network_device=dbnetdev).delete()

        if discovered_macs:
            now = datetime.now()
            for (macaddr, port) in discovered_macs:
                update_or_create_observed_mac(session, dbnetdev, port,
                                              macaddr, now)

        session.flush()

        with plenary.get_key():
            plenary.stash()
            try:
                plenary.write(locked=True)

                dsdb_runner = DSDBRunner(logger=logger)
                dsdb_runner.update_host(dbnetdev, oldinfo)
                dsdb_runner.commit_or_rollback("Could not update network device in DSDB")
            except:
                plenary.restore_stash()
                raise

        return