def render(self, session, logger, share, comments, hostname, resourcegroup, cluster, **arguments): validate_basic("share", share) holder = get_resource_holder(session, hostname, cluster, resourcegroup, compel=False) Share.get_unique(session, name=share, holder=holder, preclude=True) dbshare = Share(name=share, comments=comments) add_resource(session, logger, holder, dbshare) return
def render(self, session, logger, filesystem, type, mountpoint, blockdevice, bootmount, dumpfreq, fsckpass, options, hostname, cluster, resourcegroup, comments, **arguments): validate_basic("filesystem", filesystem) holder = get_resource_holder(session, hostname, cluster, resourcegroup, compel=False) Filesystem.get_unique(session, name=filesystem, holder=holder, preclude=True) if dumpfreq is None: dumpfreq = 0 if fsckpass is None: # This is already set by defaults in input.xml, but # we're being extra paranoid... fsckpass = 2 # pragma: no cover dbfs = Filesystem(name=filesystem, mountpoint=mountpoint, mountoptions=options, mount=bool(bootmount), blockdev=blockdevice, fstype=type, passno=fsckpass, dumpfreq=dumpfreq, comments=comments ) return add_resource(session, logger, holder, dbfs)
def render(self, session, logger, filesystem, type, mountpoint, blockdevice, bootmount, dumpfreq, fsckpass, options, hostname, cluster, resourcegroup, comments, **arguments): validate_basic("filesystem", filesystem) holder = get_resource_holder(session, hostname, cluster, resourcegroup, compel=False) Filesystem.get_unique(session, name=filesystem, holder=holder, preclude=True) if dumpfreq is None: dumpfreq = 0 if fsckpass is None: # This is already set by defaults in input.xml, but # we're being extra paranoid... fsckpass = 2 # pragma: no cover dbfs = Filesystem(name=filesystem, mountpoint=mountpoint, mountoptions=options, mount=bool(bootmount), blockdev=blockdevice, fstype=type, passno=fsckpass, dumpfreq=dumpfreq, comments=comments) return add_resource(session, logger, holder, dbfs)
def render( self, session, logger, hostlink, target, owner, group, hostname, cluster, resourcegroup, comments, **arguments ): validate_nlist_key("hostlink", hostlink) holder = get_resource_holder(session, hostname, cluster, resourcegroup, compel=False) Hostlink.get_unique(session, name=hostlink, holder=holder, preclude=True) dbhl = Hostlink(name=hostlink, comments=comments, target=target, owner_user=owner, owner_group=group) return add_resource(session, logger, holder, dbhl)
def render(self, session, logger, application, eonid, hostname, cluster, resourcegroup, comments, **arguments): validate_basic("application", application) holder = get_resource_holder(session, hostname, cluster, resourcegroup, compel=False) Application.get_unique(session, name=application, holder=holder, preclude=True) dbapp = Application(name=application, comments=comments, eonid=eonid) return add_resource(session, logger, holder, dbapp)
def render(self, session, logger, resourcegroup, required_type, hostname, cluster, **arguments): validate_nlist_key("resourcegroup", resourcegroup) if required_type is not None: Resource.polymorphic_subclass(required_type, "Unknown resource type") if required_type == "resourcegroup": raise ArgumentError("A resourcegroup can't hold other " "resourcegroups.") holder = get_resource_holder(session, hostname, cluster, compel=False) ResourceGroup.get_unique(session, name=resourcegroup, holder=holder, preclude=True) dbrg = ResourceGroup(name=resourcegroup, required_type=required_type) return add_resource(session, logger, holder, dbrg)
def render(self, session, logger, resourcegroup, required_type, hostname, cluster, **arguments): validate_basic("resourcegroup", resourcegroup) if required_type is not None: Resource.polymorphic_subclass(required_type, "Unknown resource type") if required_type == "resourcegroup": raise ArgumentError("A resourcegroup can't hold other " "resourcegroups.") holder = get_resource_holder(session, hostname, cluster, compel=False) ResourceGroup.get_unique(session, name=resourcegroup, holder=holder, preclude=True) dbrg = ResourceGroup(name=resourcegroup, required_type=required_type) return add_resource(session, logger, holder, dbrg)
def render(self, session, logger, hostlink, target, owner, group, hostname, cluster, resourcegroup, comments, **arguments): validate_basic("hostlink", hostlink) holder = get_resource_holder(session, hostname, cluster, resourcegroup, compel=False) Hostlink.get_unique(session, name=hostlink, holder=holder, preclude=True) dbhl = Hostlink(name=hostlink, comments=comments, target=target, owner_user=owner, owner_group=group) return add_resource(session, logger, holder, dbhl)
try: who = get_resource_holder(session, hostname, cluster) q.filter_by(holder=who).one() except NoResultFound, e: raise ArgumentError("there is no reboot_schedule defined") # More thorough check reboot_schedule and intervention # XXX TODO # i) detect week of month of start of intervention # ii) detect time # iii) compute week of application of reboot_schedule # iv) ... and time # v) test all the above doesn't conflict within 1hr of each other. # Setup intervention holder = get_resource_holder(session, hostname, cluster, compel=False) RebootIntervention.get_unique(session, name=intervention, holder=holder, preclude=True) dbiv = RebootIntervention(name=intervention, expiry_date=expire_when, start_date=start_when, users=allowusers, groups=allowgroups, disabled=disabled_actions, comments=comments, justification=justification) return add_resource(session, logger, holder, dbiv)
def render(self, session, logger, service_address, ip, name, interfaces, hostname, cluster, resourcegroup, network_environment, map_to_primary, comments, **arguments): validate_basic("name", name) # TODO: generalize the error message - Layer-3 failover may be # implemented by other software, not just Zebra. if name == "hostname": raise ArgumentError( "The hostname service address is reserved for " "Zebra. Please specify the --zebra_interfaces " "option when calling add_host if you want the " "primary name of the host to be managed by " "Zebra.") ifnames = [ifname.strip().lower() for ifname in interfaces.split(",")] if not ifnames: raise ArgumentError("Please specify at least one interface name.") holder = get_resource_holder(session, hostname, cluster, resourcegroup, compel=False) # Address assignments should be added based on the host/cluster, so we # have to resolve resource groups first if isinstance(holder.holder_object, ResourceGroup): real_holder = holder.holder_object.holder.holder_object else: real_holder = holder.holder_object ServiceAddress.get_unique(session, name=name, holder=holder, preclude=True) # TODO: add allow_multi=True dbdns_rec, newly_created = grab_address(session, service_address, ip, network_environment) ip = dbdns_rec.ip dbnetwork = dbdns_rec.network if map_to_primary: if not isinstance(real_holder, Host): raise ArgumentError("The --map_to_primary option works only " "for host-based service addresses.") dbdns_rec.reverse_ptr = real_holder.machine.primary_name.fqdn # Disable autoflush, since the ServiceAddress object won't be complete # until add_resource() is called with session.no_autoflush: dbsrv = ServiceAddress(name=name, dns_record=dbdns_rec, comments=comments) holder.resources.append(dbsrv) oldinfo = None if isinstance(real_holder, Cluster): if not real_holder.hosts: # The interface names are only stored in the # AddressAssignment objects, so we can't handle a cluster # with no hosts and thus no interfaces raise ArgumentError("Cannot assign a service address to a " "cluster that has no members.") for host in real_holder.hosts: apply_service_address(host, ifnames, dbsrv) elif isinstance(real_holder, Host): oldinfo = DSDBRunner.snapshot_hw(real_holder.machine) apply_service_address(real_holder, ifnames, dbsrv) else: # pragma: no cover raise UnimplementedError("{0} as a resource holder is not " "implemented.".format(real_holder)) add_resource(session, logger, holder, dbsrv, dsdb_callback=add_srv_dsdb_callback, real_holder=real_holder, oldinfo=oldinfo, newly_created=newly_created, comments=comments) return
def render(self, session, logger, service_address, ip, name, interfaces, hostname, cluster, resourcegroup, network_environment, map_to_primary, comments, **arguments): validate_nlist_key("name", name) # TODO: generalize the error message - Layer-3 failover may be # implemented by other software, not just Zebra. if name == "hostname": raise ArgumentError("The hostname service address is reserved for " "Zebra. Please specify the --zebra_interfaces " "option when calling add_host if you want the " "primary name of the host to be managed by " "Zebra.") ifnames = [ifname.strip().lower() for ifname in interfaces.split(",")] if not ifnames: raise ArgumentError("Please specify at least one interface name.") holder = get_resource_holder(session, hostname, cluster, resourcegroup, compel=False) toplevel_holder = holder.toplevel_holder_object ServiceAddress.get_unique(session, name=name, holder=holder, preclude=True) # TODO: add allow_multi=True dbdns_rec, newly_created = grab_address(session, service_address, ip, network_environment) ip = dbdns_rec.ip if map_to_primary: if not isinstance(toplevel_holder, Host): raise ArgumentError("The --map_to_primary option works only " "for host-based service addresses.") dbdns_rec.reverse_ptr = toplevel_holder.hardware_entity.primary_name.fqdn # Disable autoflush, since the ServiceAddress object won't be complete # until add_resource() is called with session.no_autoflush: dbsrv = ServiceAddress(name=name, dns_record=dbdns_rec, comments=comments) holder.resources.append(dbsrv) oldinfo = None if isinstance(toplevel_holder, Cluster): if not toplevel_holder.hosts: # The interface names are only stored in the # AddressAssignment objects, so we can't handle a cluster # with no hosts and thus no interfaces raise ArgumentError("Cannot assign a service address to a " "cluster that has no members.") for host in toplevel_holder.hosts: apply_service_address(host, ifnames, dbsrv, logger) elif isinstance(toplevel_holder, Host): oldinfo = DSDBRunner.snapshot_hw(toplevel_holder.hardware_entity) apply_service_address(toplevel_holder, ifnames, dbsrv, logger) else: # pragma: no cover raise UnimplementedError("{0} as a resource holder is not " "implemented.".format(toplevel_holder)) add_resource(session, logger, holder, dbsrv, dsdb_callback=add_srv_dsdb_callback, toplevel_holder=toplevel_holder, oldinfo=oldinfo, newly_created=newly_created, comments=comments) return
if start_time is None: start_when = datetime.utcnow().replace(microsecond=0) else: try: start_when = parse(start_time) except ValueError, e: raise ArgumentError("the start time '%s' could not be " "interpreted: %s" % (start_time, e)) if start_when > expire_when: raise ArgumentError("the start time is later than the expiry time") holder = get_resource_holder(session, hostname, cluster, compel=False) Intervention.get_unique(session, name=intervention, holder=holder, preclude=True) dbiv = Intervention(name=intervention, expiry_date=expire_when, start_date=start_when, users=allowusers, groups=allowgroups, disabled=disabled_actions, comments=comments, justification=justification) return add_resource(session, logger, holder, dbiv)
class CommandAddRebootSchedule(BrokerCommand): required_parameters = ["week", "day"] COMPONENTS = { "week": ["1", "2", "3", "4", "5"], "day": ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"], } REGEXP_VALIDATION = { "time": re.compile(r"^(:?[0-9:]+|None)$"), "week": re.compile(r'^(:?(:?' + '|'.join(COMPONENTS["week"]) + ')(:?,(:?' + '|'.join(COMPONENTS["week"]) + '))*|all)$'), #"day": re.compile(r'^(:?' # + '|'.join(COMPONENTS["day"]) + ')(:?,(:?' # + '|'.join(COMPONENTS["day"]) + '))*$') "day": re.compile(r'^(:?' + '|'.join(COMPONENTS["day"]) + ')$') } def _fix_parameter_order(self, key, value): items = value.split(",") new = [] for item in self.COMPONENTS[key]: if item in items: new.append(item) return ",".join(new) def _validate_args(self, logger, **arguments): """ Validate arguments used for adding a new record""" regexps = CommandAddRebootSchedule.REGEXP_VALIDATION for key, validator in regexps.iteritems(): if key in arguments: data = str(arguments.get(key)) if not validator.match(data): raise ArgumentError("key %s contains a bad value" % key) if re.search(',', data): dups = dict() for sub in data.split(','): if sub not in self.COMPONENTS[key]: raise ArgumentError("parameter %s is not valid %s" % (sub, key)) if sub in dups: raise ArgumentError("parameter %s duplicated in %s" % (sub, key)) dups[sub] = 1 # enforce order to comma separated values if "day" in arguments: arguments["day"] = self._fix_parameter_order("day", arguments["day"]) if "week" in arguments and arguments["week"] != "all": arguments["week"] = self._fix_parameter_order("week", arguments["week"]) if "week" in arguments and arguments["week"] == "1,2,3,4,5": arguments["week"] = "all" return arguments def render(self, session, logger, **arguments): reboot_schedule = "reboot_schedule" validate_basic("reboot_schedule", reboot_schedule) arguments = self._validate_args(logger, **arguments) time = arguments["time"] week = arguments["week"].capitalize() day = arguments["day"].capitalize() hostname = arguments["hostname"] cluster = arguments["cluster"] comments = arguments["comments"] if time is not None: try: parse(time) except ValueError, e: raise ArgumentError("the preferred time '%s' could not be " "interpreted: %s" % (time, e)) holder = get_resource_holder(session, hostname, cluster, compel=False) RebootSchedule.get_unique(session, name=reboot_schedule, holder=holder, preclude=True) res = RebootSchedule(name=reboot_schedule, time=time, week=week, day=day, comments=comments) return add_resource(session, logger, holder, res)