示例#1
0
 def render(self, session, logger, hostname, cluster, application,
            **arguments):
     holder = get_resource_holder(session, hostname, cluster)
     dbapp = Application.get_unique(session, name=application, holder=holder,
                                    compel=True)
     del_resource(session, logger, dbapp)
     return
示例#2
0
 def render(self, session, logger, share, hostname, resourcegroup, cluster,
            **arguments):
     holder = get_resource_holder(session, hostname, cluster, resourcegroup)
     dbshare = Share.get_unique(session, name=share, holder=holder,
                                compel=True)
     del_resource(session, logger, dbshare)
     return
示例#3
0
    def render(self, session, logger, filesystem, type, mountpoint,
               blockdevice, bootmount, dumpfreq, fsckpass, options, hostname,
               cluster, resourcegroup, comments, **arguments):

        validate_basic("filesystem", filesystem)
        holder = get_resource_holder(session,
                                     hostname,
                                     cluster,
                                     resourcegroup,
                                     compel=False)

        Filesystem.get_unique(session,
                              name=filesystem,
                              holder=holder,
                              preclude=True)

        if dumpfreq is None:
            dumpfreq = 0
        if fsckpass is None:
            # This is already set by defaults in input.xml, but
            # we're being extra paranoid...
            fsckpass = 2  # pragma: no cover

        dbfs = Filesystem(name=filesystem,
                          mountpoint=mountpoint,
                          mountoptions=options,
                          mount=bool(bootmount),
                          blockdev=blockdevice,
                          fstype=type,
                          passno=fsckpass,
                          dumpfreq=dumpfreq,
                          comments=comments)

        return add_resource(session, logger, holder, dbfs)
示例#4
0
    def render(self, session, logger, filesystem, type, mountpoint,
               blockdevice, bootmount,
               dumpfreq, fsckpass, options,
               hostname, cluster, resourcegroup,
               comments, **arguments):

        validate_basic("filesystem", filesystem)
        holder = get_resource_holder(session, hostname, cluster, resourcegroup,
                                     compel=False)

        Filesystem.get_unique(session, name=filesystem, holder=holder,
                              preclude=True)

        if dumpfreq is None:
            dumpfreq = 0
        if fsckpass is None:
            # This is already set by defaults in input.xml, but
            # we're being extra paranoid...
            fsckpass = 2  # pragma: no cover

        dbfs = Filesystem(name=filesystem,
                          mountpoint=mountpoint,
                          mountoptions=options,
                          mount=bool(bootmount),
                          blockdev=blockdevice,
                          fstype=type,
                          passno=fsckpass,
                          dumpfreq=dumpfreq,
                          comments=comments
                          )

        return add_resource(session, logger, holder, dbfs)
示例#5
0
 def render(self, session, logger, filesystem, hostname, cluster,
            resourcegroup, **arguments):
     holder = get_resource_holder(session, hostname, cluster, resourcegroup)
     dbfs = Filesystem.get_unique(session, name=filesystem, holder=holder,
                                  compel=True)
     del_resource(session, logger, dbfs)
     return
示例#6
0
 def render(self, session, logger, hostname, cluster, resourcegroup,
            hostlink, **arguments):
     holder = get_resource_holder(session, hostname, cluster, resourcegroup)
     dbhl = Hostlink.get_unique(session, name=hostlink, holder=holder,
                                compel=True)
     del_resource(session, logger, dbhl)
     return
示例#7
0
    def render(self, session, logger, hostname, cluster, **arguments):

        intervention = "reboot_intervention"

        holder = get_resource_holder(session, hostname, cluster)
        res = RebootIntervention.get_unique(session, name=intervention,
                                            holder=holder, compel=True)
        return del_resource(session, logger, res)
示例#8
0
    def render(self, session, logger, hostname, cluster,
               intervention, **arguments):

        validate_basic("intervention", intervention)
        holder = get_resource_holder(session, hostname, cluster)
        dbapp = Intervention.get_unique(session, name=intervention,
                                        holder=holder, compel=True)
        del_resource(session, logger, dbapp)
        return
示例#9
0
def show_resource(session, hostname, cluster, resourcegroup, all, name, resource_class):
    q = session.query(resource_class)
    if name:
        q = q.filter_by(name=name)
    if hostname or cluster or resourcegroup:
        who = get_resource_holder(session, hostname, cluster, resourcegroup)
        q = q.filter_by(holder=who)

    return q.all()
示例#10
0
    def render(self, session, logger, hostname, cluster, **arguments):

        reboot_schedule = "reboot_schedule"

        holder = get_resource_holder(session, hostname, cluster)
        res = RebootSchedule.get_unique(session, name=reboot_schedule,
                                        holder=holder, compel=True)

        return del_resource(session, logger, res)
示例#11
0
    def render(self, session, logger, filesystem,
               hostname, cluster, resourcegroup, **arguments):

        validate_basic("filesystem", filesystem)
        holder = get_resource_holder(session, hostname, cluster, resourcegroup)
        dbfs = Filesystem.get_unique(session, name=filesystem, holder=holder,
                                     compel=True)
        del_resource(session, logger, dbfs)
        return
示例#12
0
    def render(self, session, logger, hostname, cluster,
               application, **arguments):

        validate_basic("application", application)
        holder = get_resource_holder(session, hostname, cluster)
        dbapp = Application.get_unique(session, name=application, holder=holder,
                                       compel=True)
        del_resource(session, logger, dbapp)
        return
示例#13
0
    def render(
        self, session, logger, hostlink, target, owner, group, hostname, cluster, resourcegroup, comments, **arguments
    ):

        validate_nlist_key("hostlink", hostlink)
        holder = get_resource_holder(session, hostname, cluster, resourcegroup, compel=False)

        Hostlink.get_unique(session, name=hostlink, holder=holder, preclude=True)

        dbhl = Hostlink(name=hostlink, comments=comments, target=target, owner_user=owner, owner_group=group)
        return add_resource(session, logger, holder, dbhl)
示例#14
0
def lookup_target(session, plenaries, hostname, ip, cluster, resourcegroup,
                  service_address, alias):
    """
    Check the parameters of the server providing a given service

    Look for potential conflicts, and return a dict that is suitable to be
    passed to either the constructor of ServiceInstanceServer, or to the
    find_server() function.
    """

    params = {}

    if cluster and hostname:
        raise ArgumentError("Only one of --cluster and --hostname may be "
                            "specified.")

    if alias:
        dbdns_env = DnsEnvironment.get_unique_or_default(session)
        dbdns_rec = Alias.get_unique(session, fqdn=alias,
                                     dns_environment=dbdns_env, compel=True)
        params["alias"] = dbdns_rec

    if hostname:
        params["host"] = hostname_to_host(session, hostname)
        plenaries.append(Plenary.get_plenary(params["host"]))
    if cluster:
        params["cluster"] = Cluster.get_unique(session, cluster, compel=True)
        plenaries.append(Plenary.get_plenary(params["cluster"]))

    if service_address:
        # TODO: calling get_resource_holder() means doing redundant DB lookups
        # TODO: it would be nice to also accept an FQDN for the service address,
        # to be consistent with the usage of the --service_address option in
        # add_service_address/del_service_address
        holder = get_resource_holder(session, hostname=hostname,
                                     cluster=cluster,
                                     resgroup=resourcegroup, compel=True)

        dbsrv_addr = ServiceAddress.get_unique(session,
                                               name=service_address,
                                               holder=holder, compel=True)
        params["service_address"] = dbsrv_addr
    elif ip:
        for addr in params["host"].hardware_entity.all_addresses():
            if ip != addr.ip:
                continue

            if addr.service_address:
                params["service_address"] = addr.service_address
            else:
                params["address_assignment"] = addr
            break

    return params
示例#15
0
    def render(self, session, logger, hostname, cluster, resourcegroup,
               hostlink, **arguments):

        validate_basic("hostlink", hostlink)
        holder = get_resource_holder(session, hostname, cluster)
        dbhl = Hostlink.get_unique(session,
                                   name=hostlink,
                                   holder=holder,
                                   compel=True)
        del_resource(session, logger, dbhl)
        return
示例#16
0
    def render(self, session, logger, share, hostname, resourcegroup, cluster,
               **arguments):

        validate_basic("share", share)
        holder = get_resource_holder(session, hostname, cluster, resourcegroup)
        dbshare = Share.get_unique(session,
                                   name=share,
                                   holder=holder,
                                   compel=True)
        del_resource(session, logger, dbshare)
        return
示例#17
0
def show_resource(session, hostname, cluster, resourcegroup, all, name,
                  resource_class):
    q = session.query(resource_class)
    if all:
        return ResourceList(q.all())
    if name:
        q = q.filter_by(name=name)

    if hostname or cluster or resourcegroup:
        who = get_resource_holder(session, hostname, cluster, resourcegroup)
        q = q.filter_by(holder=who)

    return ResourceList(q.all())
示例#18
0
    def render(self, session, logger, application, eonid,
               hostname, cluster, resourcegroup,
               comments, **arguments):

        validate_basic("application", application)
        holder = get_resource_holder(session, hostname, cluster,
                                     resourcegroup, compel=False)

        Application.get_unique(session, name=application, holder=holder,
                               preclude=True)

        dbapp = Application(name=application, comments=comments, eonid=eonid)
        return add_resource(session, logger, holder, dbapp)
示例#19
0
    def render(self, session, logger, name, hostname, cluster, resourcegroup,
               keep_dns, **arguments):

        validate_basic("name", name)

        if name == "hostname":
            raise ArgumentError("The primary address of the host cannot "
                                "be deleted.")

        holder = get_resource_holder(session,
                                     hostname,
                                     cluster,
                                     resourcegroup,
                                     compel=False)

        dbsrv = ServiceAddress.get_unique(session,
                                          name=name,
                                          holder=holder,
                                          compel=True)

        if isinstance(holder.holder_object, Host):
            oldinfo = DSDBRunner.snapshot_hw(holder.holder_object.machine)
        else:
            oldinfo = None

        dbdns_rec = dbsrv.dns_record

        for addr in dbsrv.assignments:
            addr.interface.assignments.remove(addr)
        session.expire(dbsrv, ['assignments'])

        session.flush()

        # Check if the address was assigned to multiple interfaces, and remove
        # the DNS entries if this was the last use
        q = session.query(AddressAssignment)
        q = q.filter_by(network=dbdns_rec.network)
        q = q.filter_by(ip=dbdns_rec.ip)
        other_uses = q.all()

        del_resource(session,
                     logger,
                     dbsrv,
                     dsdb_callback=del_srv_dsdb_callback,
                     oldinfo=oldinfo,
                     keep_dns=other_uses or keep_dns)

        if not other_uses and not keep_dns:
            delete_dns_record(dbdns_rec)

        return
示例#20
0
文件: add_share.py 项目: jrha/aquilon
    def render(self, session, logger, share,
               comments, hostname, resourcegroup, cluster, **arguments):

        validate_basic("share", share)
        holder = get_resource_holder(session,
                                     hostname, cluster, resourcegroup,
                                     compel=False)

        Share.get_unique(session, name=share, holder=holder, preclude=True)

        dbshare = Share(name=share, comments=comments)
        add_resource(session, logger, holder, dbshare)

        return
示例#21
0
    def render(self, session, logger, resourcegroup, hostname, cluster,
               **arguments):
        holder = get_resource_holder(session, hostname, cluster, compel=True)
        dbrg = ResourceGroup.get_unique(session, name=resourcegroup,
                                        holder=holder, compel=True)

        # Deleting service addresses can't be done with just cascading
        if dbrg.resholder:
            for res in dbrg.resholder.resources:
                if isinstance(res, ServiceAddress):
                    raise ArgumentError("{0} contains {1:l}, please delete "
                                        "it first.".format(dbrg, res))

        del_resource(session, logger, dbrg)
        return
示例#22
0
    def render(self, session, logger, share, comments, hostname, resourcegroup,
               cluster, **arguments):

        validate_basic("share", share)
        holder = get_resource_holder(session,
                                     hostname,
                                     cluster,
                                     resourcegroup,
                                     compel=False)

        Share.get_unique(session, name=share, holder=holder, preclude=True)

        dbshare = Share(name=share, comments=comments)
        add_resource(session, logger, holder, dbshare)

        return
示例#23
0
    def render(self, session, logger, resourcegroup, required_type,
               hostname, cluster, **arguments):

        validate_basic("resourcegroup", resourcegroup)

        if required_type is not None:
            Resource.polymorphic_subclass(required_type,
                                          "Unknown resource type")
            if required_type == "resourcegroup":
                raise ArgumentError("A resourcegroup can't hold other "
                                    "resourcegroups.")

        holder = get_resource_holder(session, hostname, cluster, compel=False)

        ResourceGroup.get_unique(session, name=resourcegroup, holder=holder,
                                 preclude=True)

        dbrg = ResourceGroup(name=resourcegroup, required_type=required_type)
        return add_resource(session, logger, holder, dbrg)
示例#24
0
    def render(self, session, logger, name, hostname, cluster, resourcegroup,
               keep_dns, **arguments):

        validate_basic("name", name)

        if name == "hostname":
            raise ArgumentError("The primary address of the host cannot "
                                "be deleted.")

        holder = get_resource_holder(session, hostname, cluster,
                                     resourcegroup, compel=False)

        dbsrv = ServiceAddress.get_unique(session, name=name, holder=holder,
                                          compel=True)

        if isinstance(holder.holder_object, Host):
            oldinfo = DSDBRunner.snapshot_hw(holder.holder_object.machine)
        else:
            oldinfo = None

        dbdns_rec = dbsrv.dns_record

        for addr in dbsrv.assignments:
            addr.interface.assignments.remove(addr)
        session.expire(dbsrv, ['assignments'])

        session.flush()

        # Check if the address was assigned to multiple interfaces, and remove
        # the DNS entries if this was the last use
        q = session.query(AddressAssignment)
        q = q.filter_by(network=dbdns_rec.network)
        q = q.filter_by(ip=dbdns_rec.ip)
        other_uses = q.all()

        del_resource(session, logger, dbsrv,
                     dsdb_callback=del_srv_dsdb_callback, oldinfo=oldinfo,
                     keep_dns=other_uses or keep_dns)

        if not other_uses and not keep_dns:
            delete_dns_record(dbdns_rec)

        return
示例#25
0
    def render(self, session, logger, resourcegroup, required_type,
               hostname, cluster, **arguments):

        validate_nlist_key("resourcegroup", resourcegroup)

        if required_type is not None:
            Resource.polymorphic_subclass(required_type,
                                          "Unknown resource type")
            if required_type == "resourcegroup":
                raise ArgumentError("A resourcegroup can't hold other "
                                    "resourcegroups.")

        holder = get_resource_holder(session, hostname, cluster, compel=False)

        ResourceGroup.get_unique(session, name=resourcegroup, holder=holder,
                                 preclude=True)

        dbrg = ResourceGroup(name=resourcegroup, required_type=required_type)
        return add_resource(session, logger, holder, dbrg)
示例#26
0
    def render(self, session, share, hostname, resourcegroup, cluster, all,
               **arguments):
        q = session.query(Share)
        if share:
            q = q.filter_by(name=share)

        q = q.options(undefer(Share.disk_count), undefer(Share.machine_count))

        if hostname or cluster or resourcegroup:
            who = get_resource_holder(session, hostname, cluster,
                                      resourcegroup)
            q = q.filter_by(holder=who)

        shares = q.all()

        share_info = cache_storage_data()
        for dbshare in shares:
            dbshare.populate_share_info(share_info)

        return ResourceList(shares)
示例#27
0
    def render(self, session, share, hostname, resourcegroup, cluster, all,
               **arguments):
        q = session.query(Share)
        if share:
            q = q.filter_by(name=share)

        q = q.options(undefer(Share.disk_count),
                      undefer(Share.machine_count))

        if hostname or cluster or resourcegroup:
            who = get_resource_holder(session, hostname, cluster, resourcegroup)
            q = q.filter_by(holder=who)

        shares = q.all()

        share_info = cache_storage_data()
        for dbshare in shares:
            dbshare.populate_share_info(share_info)

        return ResourceList(shares)
示例#28
0
    def render(self, session, logger, hostlink, target, owner, group, hostname,
               cluster, resourcegroup, comments, **arguments):

        validate_basic("hostlink", hostlink)
        holder = get_resource_holder(session,
                                     hostname,
                                     cluster,
                                     resourcegroup,
                                     compel=False)

        Hostlink.get_unique(session,
                            name=hostlink,
                            holder=holder,
                            preclude=True)

        dbhl = Hostlink(name=hostlink,
                        comments=comments,
                        target=target,
                        owner_user=owner,
                        owner_group=group)
        return add_resource(session, logger, holder, dbhl)
示例#29
0
    def render(self, session, logger, service_address, ip, name, interfaces,
               hostname, cluster, resourcegroup,
               network_environment, map_to_primary, comments, **arguments):

        validate_nlist_key("name", name)

        # TODO: generalize the error message - Layer-3 failover may be
        # implemented by other software, not just Zebra.
        if name == "hostname":
            raise ArgumentError("The hostname service address is reserved for "
                                "Zebra.  Please specify the --zebra_interfaces "
                                "option when calling add_host if you want the "
                                "primary name of the host to be managed by "
                                "Zebra.")

        ifnames = [ifname.strip().lower() for ifname in interfaces.split(",")]
        if not ifnames:
            raise ArgumentError("Please specify at least one interface name.")

        holder = get_resource_holder(session, hostname, cluster,
                                     resourcegroup, compel=False)
        toplevel_holder = holder.toplevel_holder_object

        ServiceAddress.get_unique(session, name=name, holder=holder,
                                  preclude=True)

        # TODO: add allow_multi=True
        dbdns_rec, newly_created = grab_address(session, service_address, ip,
                                                network_environment)
        ip = dbdns_rec.ip

        if map_to_primary:
            if not isinstance(toplevel_holder, Host):
                raise ArgumentError("The --map_to_primary option works only "
                                    "for host-based service addresses.")
            dbdns_rec.reverse_ptr = toplevel_holder.hardware_entity.primary_name.fqdn

        # Disable autoflush, since the ServiceAddress object won't be complete
        # until add_resource() is called
        with session.no_autoflush:
            dbsrv = ServiceAddress(name=name, dns_record=dbdns_rec,
                                   comments=comments)
            holder.resources.append(dbsrv)

            oldinfo = None
            if isinstance(toplevel_holder, Cluster):
                if not toplevel_holder.hosts:
                    # The interface names are only stored in the
                    # AddressAssignment objects, so we can't handle a cluster
                    # with no hosts and thus no interfaces
                    raise ArgumentError("Cannot assign a service address to a "
                                        "cluster that has no members.")
                for host in toplevel_holder.hosts:
                    apply_service_address(host, ifnames, dbsrv, logger)
            elif isinstance(toplevel_holder, Host):
                oldinfo = DSDBRunner.snapshot_hw(toplevel_holder.hardware_entity)
                apply_service_address(toplevel_holder, ifnames, dbsrv, logger)
            else:  # pragma: no cover
                raise UnimplementedError("{0} as a resource holder is not "
                                         "implemented.".format(toplevel_holder))

        add_resource(session, logger, holder, dbsrv,
                     dsdb_callback=add_srv_dsdb_callback,
                     toplevel_holder=toplevel_holder, oldinfo=oldinfo,
                     newly_created=newly_created, comments=comments)

        return
示例#30
0
            raise ArgumentError("the expiry value '%s' could not be "
                                "interpreted: %s" % (expiry, e))

        if start_time is None:
            start_when = datetime.utcnow().replace(microsecond=0)
        else:
            try:
                start_when = parse(start_time)
            except ValueError, e:
                raise ArgumentError("the start time '%s' could not be "
                                    "interpreted: %s" % (start_time, e))

        if start_when > expire_when:
            raise ArgumentError("the start time is later than the expiry time")

        holder = get_resource_holder(session, hostname, cluster, compel=False)

        Intervention.get_unique(session,
                                name=intervention,
                                holder=holder,
                                preclude=True)

        dbiv = Intervention(name=intervention,
                            expiry_date=expire_when,
                            start_date=start_when,
                            users=allowusers,
                            groups=allowgroups,
                            disabled=disabled_actions,
                            comments=comments,
                            justification=justification)
示例#31
0
    def render(self, session, logger, machine, model, vendor, serial, chassis,
               slot, cpuname, cpuvendor, cpuspeed, cpucount, memory, cluster,
               vmhost, uri, comments, **arguments):
        dblocation = get_location(session,
                                  query_options=[subqueryload('parents'),
                                                 joinedload('parents.dns_maps')],
                                  **arguments)
        if chassis:
            dbchassis = Chassis.get_unique(session, chassis, compel=True)
            if slot is None:
                raise ArgumentError("The --chassis option requires a --slot.")
            if dblocation and dblocation != dbchassis.location:
                raise ArgumentError("{0} conflicts with chassis location "
                                    "{1}.".format(dblocation, dbchassis.location))
            dblocation = dbchassis.location
        elif slot is not None:
            raise ArgumentError("The --slot option requires a --chassis.")

        dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                   compel=True)

        if not dbmodel.model_type.isMachineType():
            raise ArgumentError("The add_machine command cannot add machines "
                                "of type %s." % str(dbmodel.model_type))

        vmholder = None

        if cluster or vmhost:
            if cluster and vmhost:
                raise ArgumentError("Cluster and vmhost cannot be specified "
                                    "together.")
            if not dbmodel.model_type.isVirtualMachineType():
                raise ArgumentError("{0} is not a virtual machine."
                                    .format(dbmodel))

            # TODO: do we need VMs inside resource groups?
            vmholder = get_resource_holder(session, hostname=vmhost,
                                           cluster=cluster, resgroup=None,
                                           compel=False)

            if vmholder.holder_object.status.name == 'decommissioned':
                raise ArgumentError("Cannot add virtual machines to "
                                    "decommissioned clusters.")

            if cluster:
                container_loc = vmholder.holder_object.location_constraint
            else:
                container_loc = vmholder.holder_object.hardware_entity.location

            if dblocation and dblocation != container_loc:
                raise ArgumentError("Cannot override container location {0} "
                                    "with location {1}.".format(container_loc,
                                                                dblocation))
            dblocation = container_loc
        elif dbmodel.model_type.isVirtualMachineType():
            raise ArgumentError("Virtual machines must be assigned to a "
                                "cluster or a host.")

        Machine.get_unique(session, machine, preclude=True)
        dbmachine = create_machine(session, machine, dblocation, dbmodel,
                                   cpuname, cpuvendor, cpuspeed, cpucount,
                                   memory, serial, comments)

        if uri and not dbmodel.model_type.isVirtualAppliance():
            raise ArgumentError("URI can be specified only for virtual "
                                "appliances and the model's type is %s." %
                                dbmodel.model_type)

        dbmachine.uri = uri

        if chassis:
            # FIXME: Are virtual machines allowed to be in a chassis?
            dbslot = session.query(ChassisSlot).filter_by(chassis=dbchassis,
                                                          slot_number=slot).first()
            if not dbslot:
                dbslot = ChassisSlot(chassis=dbchassis, slot_number=slot)
            dbslot.machine = dbmachine
            session.add(dbslot)

        if vmholder:
            dbvm = VirtualMachine(machine=dbmachine, name=dbmachine.label,
                                  holder=vmholder)
            if hasattr(vmholder.holder_object, "validate") and \
               callable(vmholder.holder_object.validate):
                vmholder.holder_object.validate()

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if vmholder:
            plenaries.append(Plenary.get_plenary(vmholder.holder_object))
            plenaries.append(Plenary.get_plenary(dbvm))

        # The check to make sure a plenary file is not written out for
        # dummy aurora hardware is within the call to write().  This way
        # it is consistent without altering (and forgetting to alter)
        # all the calls to the method.
        plenaries.write()
        return
示例#32
0
    def render(self, session, logger, machine, model, vendor, serial,
               chassis, slot, clearchassis, multislot,
               vmhost, cluster, allow_metacluster_change,
               cpuname, cpuvendor, cpuspeed, cpucount, memory, ip,
               **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        plenaries = PlenaryCollection(logger=logger)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        if clearchassis:
            del dbmachine.chassis_slot[:]

        remove_plenaries = PlenaryCollection(logger=logger)
        if chassis:
            dbchassis = Chassis.get_unique(session, chassis, compel=True)
            if machine_plenary_will_move(old=dbmachine.location,
                                         new=dbchassis.location):
                remove_plenaries.append(Plenary.get_plenary(dbmachine))
            dbmachine.location = dbchassis.location
            if slot is None:
                raise ArgumentError("Option --chassis requires --slot "
                                    "information.")
            self.adjust_slot(session, logger,
                             dbmachine, dbchassis, slot, multislot)
        elif slot is not None:
            dbchassis = None
            for dbslot in dbmachine.chassis_slot:
                if dbchassis and dbslot.chassis != dbchassis:
                    raise ArgumentError("Machine in multiple chassis, please "
                                        "use --chassis argument.")
                dbchassis = dbslot.chassis
            if not dbchassis:
                raise ArgumentError("Option --slot requires --chassis "
                                    "information.")
            self.adjust_slot(session, logger,
                             dbmachine, dbchassis, slot, multislot)

        dblocation = get_location(session, **arguments)
        if dblocation:
            loc_clear_chassis = False
            for dbslot in dbmachine.chassis_slot:
                dbcl = dbslot.chassis.location
                if dbcl != dblocation:
                    if chassis or slot is not None:
                        raise ArgumentError("{0} conflicts with chassis {1!s} "
                                            "location {2}.".format(dblocation,
                                                        dbslot.chassis, dbcl))
                    else:
                        loc_clear_chassis = True
            if loc_clear_chassis:
                del dbmachine.chassis_slot[:]
            if machine_plenary_will_move(old=dbmachine.location,
                                         new=dblocation):
                remove_plenaries.append(Plenary.get_plenary(dbmachine))
            dbmachine.location = dblocation

        if model or vendor:
            # If overriding model, should probably overwrite default
            # machine specs as well.
            if not model:
                model = dbmachine.model.name
            if not vendor:
                vendor = dbmachine.model.vendor.name
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       compel=True)
            if dbmodel.machine_type not in ['blade', 'rackmount',
                                            'workstation', 'aurora_node',
                                            'virtual_machine']:
                raise ArgumentError("The update_machine command cannot update "
                                    "machines of type %s." %
                                    dbmodel.machine_type)
            # We probably could do this by forcing either cluster or
            # location data to be available as appropriate, but really?
            # Failing seems reasonable.
            if dbmodel.machine_type != dbmachine.model.machine_type and \
               'virtual_machine' in [dbmodel.machine_type,
                                     dbmachine.model.machine_type]:
                raise ArgumentError("Cannot change machine from %s to %s." %
                                    (dbmachine.model.machine_type,
                                     dbmodel.machine_type))

            old_nic_model = dbmachine.model.nic_model
            new_nic_model = dbmodel.nic_model
            if old_nic_model != new_nic_model:
                for iface in dbmachine.interfaces:
                    if iface.model == old_nic_model:
                        iface.model = new_nic_model

            dbmachine.model = dbmodel

        if cpuname or cpuvendor or cpuspeed is not None:
            dbcpu = Cpu.get_unique(session, name=cpuname, vendor=cpuvendor,
                                   speed=cpuspeed, compel=True)
            dbmachine.cpu = dbcpu

        if cpucount is not None:
            dbmachine.cpu_quantity = cpucount
        if memory is not None:
            dbmachine.memory = memory
        if serial:
            dbmachine.serial_no = serial

        if ip:
            update_primary_ip(session, dbmachine, ip)

        # FIXME: For now, if a machine has its interface(s) in a portgroup
        # this command will need to be followed by an update_interface to
        # re-evaluate the portgroup for overflow.
        # It would be better to have --pg and --autopg options to let it
        # happen at this point.
        if cluster or vmhost:
            if not dbmachine.vm_container:
                raise ArgumentError("Cannot convert a physical machine to "
                                    "virtual.")

            old_holder = dbmachine.vm_container.holder.holder_object
            resholder = get_resource_holder(session, hostname=vmhost,
                                            cluster=cluster, compel=False)
            new_holder = resholder.holder_object

            # TODO: do we want to allow moving machines between the cluster and
            # metacluster level?
            if new_holder.__class__ != old_holder.__class__:
                raise ArgumentError("Cannot move a VM between a cluster and a "
                                    "stand-alone host.")

            if cluster:
                if new_holder.metacluster != old_holder.metacluster \
                   and not allow_metacluster_change:
                    raise ArgumentError("Current {0:l} does not match "
                                        "new {1:l}."
                                        .format(old_holder.metacluster,
                                                new_holder.metacluster))

            remove_plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
            dbmachine.vm_container.holder = resholder

            for dbdisk in dbmachine.disks:
                if not isinstance(dbdisk, VirtualDisk):
                    continue
                old_share = dbdisk.share
                if isinstance(old_share.holder, BundleResource):
                    resourcegroup = old_share.holder.name
                else:
                    resourcegroup = None
                new_share = find_share(new_holder, resourcegroup, old_share.name,
                                       error=ArgumentError)

                # If the shares are registered at the metacluster level and both
                # clusters are in the same metacluster, then there will be no
                # real change here
                if new_share != old_share:
                    old_share.disks.remove(dbdisk)
                    new_share.disks.append(dbdisk)

            if isinstance(new_holder, Cluster):
                dbmachine.location = new_holder.location_constraint
            else:
                dbmachine.location = new_holder.location

            session.flush()
            plenaries.append(Plenary.get_plenary(old_holder))
            plenaries.append(Plenary.get_plenary(new_holder))

        if dbmachine.vm_container:
            plenaries.append(Plenary.get_plenary(dbmachine.vm_container))

        session.flush()

        # Check if the changed parameters still meet cluster capacity
        # requiremets
        if dbmachine.cluster:
            dbmachine.cluster.validate()
            if allow_metacluster_change:
                dbmachine.cluster.metacluster.validate()
        if dbmachine.host and dbmachine.host.cluster:
            dbmachine.host.cluster.validate()

        # The check to make sure a plenary file is not written out for
        # dummy aurora hardware is within the call to write().  This way
        # it is consistent without altering (and forgetting to alter)
        # all the calls to the method.
        plenaries.append(Plenary.get_plenary(dbmachine))
        if remove_plenaries.plenaries and dbmachine.host:
            plenaries.append(Plenary.get_plenary(dbmachine.host))

        key = CompileKey.merge([plenaries.get_write_key(),
                                remove_plenaries.get_remove_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            plenaries.write(locked=True)
            remove_plenaries.remove(locked=True)

            if dbmachine.host:
                # XXX: May need to reconfigure.
                pass

            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.update_host(dbmachine, oldinfo)
            dsdb_runner.commit_or_rollback("Could not update machine in DSDB")
        except:
            plenaries.restore_stash()
            remove_plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
示例#33
0
class CommandAddRebootSchedule(BrokerCommand):

    required_parameters = ["week", "day"]

    COMPONENTS = {
        "week": ["1", "2", "3", "4", "5"],
        "day": ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"],
    }
    REGEXP_VALIDATION = {
        "time": re.compile(r"^(:?[0-9:]+|None)$"),
        "week": re.compile(r'^(:?(:?'
                           + '|'.join(COMPONENTS["week"]) + ')(:?,(:?'
                           + '|'.join(COMPONENTS["week"]) + '))*|all)$'),
        #"day": re.compile(r'^(:?'
        #                   + '|'.join(COMPONENTS["day"]) + ')(:?,(:?'
        #                   + '|'.join(COMPONENTS["day"]) + '))*$')
        "day": re.compile(r'^(:?'
                          + '|'.join(COMPONENTS["day"])
                          + ')$')
    }

    def _fix_parameter_order(self, key, value):
        items = value.split(",")
        new = []
        for item in self.COMPONENTS[key]:
            if item in items:
                new.append(item)

        return ",".join(new)

    def _validate_args(self, logger, **arguments):
        """ Validate arguments used for adding a new record"""
        regexps = CommandAddRebootSchedule.REGEXP_VALIDATION
        for key, validator in regexps.iteritems():
            if key in arguments:
                data = str(arguments.get(key))
                if not validator.match(data):
                    raise ArgumentError("key %s contains a bad value" % key)

                if re.search(',', data):
                    dups = dict()

                    for sub in data.split(','):
                        if sub not in self.COMPONENTS[key]:
                            raise ArgumentError("parameter %s is not valid %s"
                                                % (sub, key))

                        if sub in dups:
                            raise ArgumentError("parameter %s duplicated in %s"
                                                % (sub, key))

                        dups[sub] = 1

        # enforce order to comma separated values
        if "day" in arguments:
            arguments["day"] = self._fix_parameter_order("day",
                                                         arguments["day"])

        if "week" in arguments and arguments["week"] != "all":
            arguments["week"] = self._fix_parameter_order("week",
                                                          arguments["week"])

        if "week" in arguments and arguments["week"] == "1,2,3,4,5":
            arguments["week"] = "all"

        return arguments

    def render(self, session, logger, **arguments):

        reboot_schedule = "reboot_schedule"
        validate_basic("reboot_schedule", reboot_schedule)
        arguments = self._validate_args(logger, **arguments)

        time = arguments["time"]
        week = arguments["week"].capitalize()
        day = arguments["day"].capitalize()
        hostname = arguments["hostname"]
        cluster = arguments["cluster"]
        comments = arguments["comments"]
        if time is not None:
            try:
                parse(time)
            except ValueError, e:
                raise ArgumentError("the preferred time '%s' could not be "
                                    "interpreted: %s" % (time, e))
        holder = get_resource_holder(session, hostname, cluster, compel=False)

        RebootSchedule.get_unique(session, name=reboot_schedule, holder=holder,
                                  preclude=True)

        res = RebootSchedule(name=reboot_schedule,
                             time=time,
                             week=week,
                             day=day,
                             comments=comments)

        return add_resource(session, logger, holder, res)
示例#34
0
            start_when = datetime.utcnow().replace(microsecond=0)
        else:
            try:
                start_when = parse(start_time)

            except ValueError, e:
                raise ArgumentError("the start time '%s' could not be "
                                    "interpreted: %s" % (start_time, e))

        if start_when > expire_when:
            raise ArgumentError("the start time is later than the expiry time")

        # Check there is a reboot_schedule
        q = session.query(RebootSchedule)
        try:
            who = get_resource_holder(session, hostname, cluster)
            q.filter_by(holder=who).one()
        except NoResultFound, e:
            raise ArgumentError("there is no reboot_schedule defined")

        # More thorough check reboot_schedule and intervention
        # XXX TODO
        # i) detect week of month of start of intervention
        # ii) detect time
        # iii) compute week of application of reboot_schedule
        # iv) ... and time
        # v) test all the above doesn't conflict within 1hr of each other.

        # Setup intervention
        holder = get_resource_holder(session, hostname, cluster, compel=False)
示例#35
0
    def render(self, session, logger, machine, model, vendor, serial,
               chassis, slot, clearchassis, multislot,
               vmhost, cluster, allow_metacluster_change,
               cpuname, cpuvendor, cpuspeed, cpucount, memory, ip, uri,
               **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if dbmachine.vm_container:
            plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
        if dbmachine.host:
            # Using PlenaryHostData directly, to avoid warnings if the host has
            # not been configured yet
            plenaries.append(PlenaryHostData.get_plenary(dbmachine.host))

        if clearchassis:
            del dbmachine.chassis_slot[:]

        if chassis:
            dbchassis = Chassis.get_unique(session, chassis, compel=True)
            dbmachine.location = dbchassis.location
            if slot is None:
                raise ArgumentError("Option --chassis requires --slot "
                                    "information.")
            self.adjust_slot(session, logger,
                             dbmachine, dbchassis, slot, multislot)
        elif slot is not None:
            dbchassis = None
            for dbslot in dbmachine.chassis_slot:
                if dbchassis and dbslot.chassis != dbchassis:
                    raise ArgumentError("Machine in multiple chassis, please "
                                        "use --chassis argument.")
                dbchassis = dbslot.chassis
            if not dbchassis:
                raise ArgumentError("Option --slot requires --chassis "
                                    "information.")
            self.adjust_slot(session, logger,
                             dbmachine, dbchassis, slot, multislot)

        dblocation = get_location(session, **arguments)
        if dblocation:
            loc_clear_chassis = False
            for dbslot in dbmachine.chassis_slot:
                dbcl = dbslot.chassis.location
                if dbcl != dblocation:
                    if chassis or slot is not None:
                        raise ArgumentError("{0} conflicts with chassis {1!s} "
                                            "location {2}."
                                            .format(dblocation, dbslot.chassis,
                                                    dbcl))
                    else:
                        loc_clear_chassis = True
            if loc_clear_chassis:
                del dbmachine.chassis_slot[:]
            dbmachine.location = dblocation

            if dbmachine.host:
                for vm in dbmachine.host.virtual_machines:
                    plenaries.append(Plenary.get_plenary(vm))
                    vm.location = dblocation

        if model or vendor:
            # If overriding model, should probably overwrite default
            # machine specs as well.
            if not model:
                model = dbmachine.model.name
            if not vendor:
                vendor = dbmachine.model.vendor.name
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       compel=True)
            if not dbmodel.model_type.isMachineType():
                raise ArgumentError("The update_machine command cannot update "
                                    "machines of type %s." %
                                    dbmodel.model_type)
            # We probably could do this by forcing either cluster or
            # location data to be available as appropriate, but really?
            # Failing seems reasonable.
            if dbmodel.model_type != dbmachine.model.model_type and \
               (dbmodel.model_type.isVirtualMachineType() or
                dbmachine.model.model_type.isVirtualMachineType()):
                raise ArgumentError("Cannot change machine from %s to %s." %
                                    (dbmachine.model.model_type,
                                     dbmodel.model_type))

            old_nic_model = dbmachine.model.nic_model
            new_nic_model = dbmodel.nic_model
            if old_nic_model != new_nic_model:
                for iface in dbmachine.interfaces:
                    if iface.model == old_nic_model:
                        iface.model = new_nic_model

            dbmachine.model = dbmodel

        if cpuname or cpuvendor or cpuspeed is not None:
            dbcpu = Cpu.get_unique(session, name=cpuname, vendor=cpuvendor,
                                   speed=cpuspeed, compel=True)
            dbmachine.cpu = dbcpu

        if cpucount is not None:
            dbmachine.cpu_quantity = cpucount
        if memory is not None:
            dbmachine.memory = memory
        if serial:
            dbmachine.serial_no = serial

        if ip:
            update_primary_ip(session, logger, dbmachine, ip)

        if uri and not dbmachine.model.model_type.isVirtualAppliance():
            raise ArgumentError("URI can be specified only for virtual "
                                "appliances and the model's type is %s" %
                                dbmachine.model.model_type)

        if uri:
            dbmachine.uri = uri

        # FIXME: For now, if a machine has its interface(s) in a portgroup
        # this command will need to be followed by an update_interface to
        # re-evaluate the portgroup for overflow.
        # It would be better to have --pg and --autopg options to let it
        # happen at this point.
        if cluster or vmhost:
            if not dbmachine.vm_container:
                raise ArgumentError("Cannot convert a physical machine to "
                                    "virtual.")

            old_holder = dbmachine.vm_container.holder.holder_object
            resholder = get_resource_holder(session, hostname=vmhost,
                                            cluster=cluster, compel=False)
            new_holder = resholder.holder_object

            if self.get_metacluster(new_holder) != self.get_metacluster(old_holder) \
               and not allow_metacluster_change:
                raise ArgumentError("Current {0:l} does not match "
                                    "new {1:l}."
                                    .format(self.get_metacluster(old_holder),
                                            self.get_metacluster(new_holder)))

            plenaries.append(Plenary.get_plenary(old_holder))
            plenaries.append(Plenary.get_plenary(new_holder))

            dbmachine.vm_container.holder = resholder

            for dbdisk in dbmachine.disks:
                if isinstance(dbdisk, VirtualNasDisk):
                    old_share = dbdisk.share
                    if isinstance(old_share.holder, BundleResource):
                        resourcegroup = old_share.holder.resourcegroup.name
                    else:
                        resourcegroup = None

                    new_share = find_resource(Share, new_holder, resourcegroup, old_share.name,
                                           error=ArgumentError)

                    # If the shares are registered at the metacluster level and both
                    # clusters are in the same metacluster, then there will be no
                    # real change here
                    if new_share != old_share:
                        old_share.disks.remove(dbdisk)
                        new_share.disks.append(dbdisk)

                if isinstance(dbdisk, VirtualLocalDisk):
                    old_filesystem = dbdisk.filesystem

                    new_filesystem = find_resource(Filesystem, new_holder, None,
                                                   old_filesystem.name,
                                                   error=ArgumentError)

                    if new_filesystem != old_filesystem:
                        old_filesystem.disks.remove(dbdisk)
                        new_filesystem.disks.append(dbdisk)

            if isinstance(new_holder, Cluster):
                dbmachine.location = new_holder.location_constraint
            else:
                # vmhost
                dbmachine.location = new_holder.hardware_entity.location

        session.flush()

        # Check if the changed parameters still meet cluster capacity
        # requiremets
        if dbmachine.cluster:
            dbmachine.cluster.validate()
            if allow_metacluster_change:
                dbmachine.cluster.metacluster.validate()
        if dbmachine.host and dbmachine.host.cluster:
            dbmachine.host.cluster.validate()

        # The check to make sure a plenary file is not written out for
        # dummy aurora hardware is within the call to write().  This way
        # it is consistent without altering (and forgetting to alter)
        # all the calls to the method.

        with plenaries.get_key():
            plenaries.stash()
            try:
                plenaries.write(locked=True)

                dsdb_runner = DSDBRunner(logger=logger)
                dsdb_runner.update_host(dbmachine, oldinfo)
                dsdb_runner.commit_or_rollback("Could not update machine in DSDB")
            except:
                plenaries.restore_stash()
                raise

        return
示例#36
0
    def render(self, session, logger, service_address, ip, name, interfaces,
               hostname, cluster, resourcegroup, network_environment,
               map_to_primary, comments, **arguments):

        validate_basic("name", name)

        # TODO: generalize the error message - Layer-3 failover may be
        # implemented by other software, not just Zebra.
        if name == "hostname":
            raise ArgumentError(
                "The hostname service address is reserved for "
                "Zebra.  Please specify the --zebra_interfaces "
                "option when calling add_host if you want the "
                "primary name of the host to be managed by "
                "Zebra.")

        ifnames = [ifname.strip().lower() for ifname in interfaces.split(",")]
        if not ifnames:
            raise ArgumentError("Please specify at least one interface name.")

        holder = get_resource_holder(session,
                                     hostname,
                                     cluster,
                                     resourcegroup,
                                     compel=False)

        # Address assignments should be added based on the host/cluster, so we
        # have to resolve resource groups first
        if isinstance(holder.holder_object, ResourceGroup):
            real_holder = holder.holder_object.holder.holder_object
        else:
            real_holder = holder.holder_object

        ServiceAddress.get_unique(session,
                                  name=name,
                                  holder=holder,
                                  preclude=True)

        # TODO: add allow_multi=True
        dbdns_rec, newly_created = grab_address(session, service_address, ip,
                                                network_environment)
        ip = dbdns_rec.ip
        dbnetwork = dbdns_rec.network

        if map_to_primary:
            if not isinstance(real_holder, Host):
                raise ArgumentError("The --map_to_primary option works only "
                                    "for host-based service addresses.")
            dbdns_rec.reverse_ptr = real_holder.machine.primary_name.fqdn

        # Disable autoflush, since the ServiceAddress object won't be complete
        # until add_resource() is called
        with session.no_autoflush:
            dbsrv = ServiceAddress(name=name,
                                   dns_record=dbdns_rec,
                                   comments=comments)
            holder.resources.append(dbsrv)

            oldinfo = None
            if isinstance(real_holder, Cluster):
                if not real_holder.hosts:
                    # The interface names are only stored in the
                    # AddressAssignment objects, so we can't handle a cluster
                    # with no hosts and thus no interfaces
                    raise ArgumentError("Cannot assign a service address to a "
                                        "cluster that has no members.")
                for host in real_holder.hosts:
                    apply_service_address(host, ifnames, dbsrv)
            elif isinstance(real_holder, Host):
                oldinfo = DSDBRunner.snapshot_hw(real_holder.machine)
                apply_service_address(real_holder, ifnames, dbsrv)
            else:  # pragma: no cover
                raise UnimplementedError("{0} as a resource holder is not "
                                         "implemented.".format(real_holder))

        add_resource(session,
                     logger,
                     holder,
                     dbsrv,
                     dsdb_callback=add_srv_dsdb_callback,
                     real_holder=real_holder,
                     oldinfo=oldinfo,
                     newly_created=newly_created,
                     comments=comments)

        return