def connect_to_wallaby(broker_addr='127.0.0.1', port=5672, username='', passwd='', mechanisms='ANONYMOUS PLAIN GSSAPI'): global supported_api_versions # set up session for wallaby session = Session() if username != '' and passwd != '': broker_str = '%s/%s@%s:%d' % (username, passwd, broker_addr, port) elif username != '': broker_str = '%s@%s:%d' % (username, broker_addr, port) else: broker_str = '%s:%d' % (broker_addr, port) sys.stdout.write("Connecting to broker %s:\n" % (broker_str)) try: broker = session.addBroker('amqp://%s' % broker_str, mechanisms=mechanisms) except: sys.stderr.write('Unable to connect to broker "%s"\n' % broker_str) raise # Retrieve the config store object sys.stdout.write("Connecting to wallaby store:\n") try: (store_agent, config_store) = WallabyHelpers.get_store_objs(session) except WallabyStoreError, error: sys.stderr.write('Error: %s\n' % error.error_str) session.delBroker(broker) raise
def get_store(): # We'll start by setting up a Wallaby client library session against # the broker on localhost from qmf.console import Session console = Session() console.addBroker() raw_store, = console.getObjects(_class="Store") store = wallaby.Store(raw_store, console) # call this method after the store client is initialized so that # the tagging library knows how to create missing groups tagging.setup(store) return store
def __init__(self, test=False): self.cts = None if test: self.tester = TriggerTester() Session.__init__(self, console=self.tester, rcvObjects=False, rcvEvents=True, rcvHeartbeats=False, manageConnections=False) else: Session.__init__(self, rcvObjects=False, rcvEvents=False, rcvHeartbeats=False, manageConnections=False)
def start(self): log.info("Starting %s", self) assert self.qmf_session is None self.qmf_session = Session(CuminConsole(self), manageConnections=True, rcvObjects=False, rcvEvents=False, rcvHeartbeats=False) for uri in self.broker_uris: self.add_broker(uri)
def monitor_qpid(self): self.logger.debug("Staring monitoring...") # Create an instance of the QMF session manager. Set userBindings to True to allow # this program to choose which objects classes it is interested in. sess = Session(self, manageConnections=True, rcvEvents=False, userBindings=True) # Register to receive updates for broker:queue objects. sess.bindClass("org.apache.qpid.broker", "queue") sess.bindClass("org.apache.qpid.broker", "exchange") broker = sess.addBroker() self.monitor = True while(self.monitor): sleep(1) self.logger.debug("monitor set to false, therefore stopping monitor") # Disconnect the broker before exiting sess.delBroker(broker) sess = None
def start(self): log.info("Starting %s", self) assert self.qmf_session is None self.qmf_session = Session(MintConsole(self.app.model), manageConnections=True, rcvObjects=True, rcvEvents=False, rcvHeartbeats=False, userBindings=True) if len(self.app.qmf_agents): for agent in self.app.qmf_agents: if len(agent) == 1: self.qmf_session.bindAgent(label=agent[0]) log.info("Binding agent, label is %s" % agent[0]) else: self.qmf_session.bindAgent(vendor=agent[0], product=agent[1], instance=agent[2]) log.info("Binding agent %s:%s:%s" % (agent[0],agent[1],agent[2])) else: self.qmf_session.bindAgent("*") log.info("Binding all agents") # Handle bind by class if len(self.app.qmf_classes): for cls in self.app.qmf_classes: pname = cls._package._name cname = cls._name self.qmf_session.bindClass(pname.lower(), cname.lower()) log.info("Binding QMF class %s.%s" % (pname, cname)) else: # Handle bind by package for pkg in self.app.qmf_packages: self.qmf_session.bindPackage(pkg._name.lower()) log.info("Binding QMF package %s" % pkg._name) for uri in self.broker_uris: self.add_broker(uri)
# you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from qmf.console import Session from sys import exit, argv url = len(argv) > 2 and argv[2] or "amqp://localhost:5672" session = Session(); try: broker = session.addBroker(url) except: print 'Unable to connect to broker' exit(1) try: agentName = "com.redhat.grid:%s" % argv[1] print "Connecting to agent at ", agentName agent = broker.getAgent(broker.getBrokerBank(),agentName) submissions = agent.getObjects(_class="submission", _package='com.redhat.grid') except: print 'Unable to access agent or submissions' session.delBroker(broker) exit(1)
class WallabyOperations(object): ''' Wrapper around the Wallaby client library. ''' def __init__(self, broker_uri, refresh_interval=None, sasl_mech_list=None): ''' Constructor. broker_uri -- the URI used to connect to a QMF message broker where a Wallaby agent is connected. The simplest URI is just a hostname but a full URI can specify scheme://user/password@host:port or a subset of those components as long as the host is included. Examples: localhost localhost:5672 amqp://fred/[email protected]:1234 refresh_interval -- default refresh interval in seconds for all items maintained by WallabyOperations' internal caching thread. A value of None causes the caching thread to wait forever before refreshing an item after a successful call unless the refresh() method is used. The refresh interval may be set for items individually with the set_interval() method. sasl_mech_list -- restricts the list of sasl mechanisms that will be allowed when connecting to a QMF message broker. If the broker URL contains no credentials, default is ANONYMOUS. If the broker URL does contain credentials, default is 'PLAIN DIGEST-MD5' ''' self.broker_uri = broker_uri self.sasl_mech_list = get_sasl_mechanisms(broker_uri, sasl_mech_list) # A wallaby Store object self._store = None # A QMF broker self._broker = None # The cache maintenance thread self._maintain_cache = None # Stop the maintenance thread self._stop = False # Cached data. Each of the keys in this dictionary is the name of # an attribute on the Wallaby Store object, with the exception of # WBTypes.TAGS. The TAGS data is a subset of the GROUPS produced # in this module. self._cache = {WBTypes.NODES: self.CacheData(refresh_interval), WBTypes.GROUPS: self.CacheData(refresh_interval), WBTypes.FEATURES: self.CacheData(refresh_interval), WBTypes.TAGS: self.CacheData(refresh_interval, synthetic=self._generate_tag_data)} # Cache a list of nodes that are members of a tag self._nodes_by_tag = dict() # Store the name of the partition group so we can filter it out # of tags/groups that we return self._partition_group = None # Lock is used for synchronization with the caching thread and # for thread safety of any and all data that could be accessed # by multiple threads. self._lock = Lock() self._condition = Condition(self._lock) def start(self, retry_secs=5): ''' Start the caching thread. This thread will attempt to connect to the broker and retrieve a Store object from the Wallaby agent. If successful, it will periodically retrieve and cache data from Wallaby. Only one caching thread may run at a time. The thread may be restarted if it has previously been stopped. Note, for the moment start() and stop() are not thread safe. They should only be called from a single thread. retry_secs -- how often the caching thread will retry failed operations. This includes attempts to connect to the broker and retrieve a Store object as well as calls to Wallaby that return no data. ''' # The connection to the broker can actually take a long # time to complete. We don't want to hang a calling function, # so we handle the connection and retrieval of the # initial Store object from Wallaby in a thread. # (There may need to be more work here if the broker or wallaby # going away and coming back causes a problem, but with # manageConnections=True and well-known agent/object ids for # Wallaby it appears to recover on its own...) # Similarly, getting node lists etc may take a long time # especially over a slow network. So we use the same thread # to retrieve things like node lists at defined intervals. # 'self' here is really a term of art since this is a local # function, but it refers to the WallabyOperations object # so the code reads nicely def maintain_cache(self): # Get initinal connection and Store obect self.session = Session(manageConnections=True) self.broker = self.session.addBroker(self.broker_uri, mechanisms=self.sasl_mech_list) while not self._stop: self._store = self._get_store() if self._store is not None: setup(self._store) self._partition_group = self._store.getPartitionGroup().name log.debug("WallabyOperations: found wallaby store object") break # Check stop inside the lock to make sure that we don't miss # a signal or a "stop" that was set while we were iterating. self._condition.acquire() if not self._stop: self._condition.wait(retry_secs) self._condition.release() # Init remaining time til next update to 0 for each # cached item in case the thread was restarted for attr, val in self._cache.iteritems(): val.remaining = 0 # Okay, now we're ready to retrieve data while not self._stop: start_processing = time.time() for attr, val in self._cache.iteritems(): if self._stop: break # val.remaining is the number of seconds left before # the next update of this data item. None is "forever". # Synthetic items are not retreived from the store. if not val.synthetic and \ val.remaining is not None and val.remaining <= 0: d = get_values(attr, getattr, self._store, attr, []) # If the data is empty, _set_cache will leave the # remaining field set to 0 for the attribute so we # will try to get it again on our next retry. # Otherwise, remaining will be reset to the full # interval for this attribute. self._set_cache(attr, d) # Now handle the synthetics. val.synthetic generates # and stores it's own results. for attr, val in self._cache.iteritems(): if self._stop: break if val.synthetic and \ val.remaining is not None and val.remaining <= 0: get_values(attr, val.synthetic, *val.args) log.debug("WallabyOperations: total refresh processing time %s" \ % (time.time() - start_processing)) # Find out how long we should sleep for. # Based on min remaining times for all items # If minimum is 0 because we have items waiting # for a retry, we fall back on retry_secs as a minimum. sleep_time = self._find_min_remaining(min=retry_secs) self._condition.acquire() if not self._stop: # Could be signaled, so track the actual sleep time log.debug("WallabyOperations: cache thread sleeping for"\ " %s seconds" % sleep_time) bed_time = time.time() self._condition.wait(sleep_time) slept = time.time() - bed_time log.debug("WallabyOperations: cache thread slept for"\ " %s seconds" % slept) # When we wake up from sleep here, we already # have the lock so we might as well check refresh # and adjust the "remaining" values for attr, val in self._cache.iteritems(): if val.refresh: # Force an update val.remaining = 0 val.refresh = False elif val.remaining is not None: val.remaining -= slept self._condition.release() # Clear cache if we have been stopped.... for attr in self._cache: self._set_cache(attr, []) self._store = None # Have to clean up the broker try: self.session.delBroker(self.broker) except: pass #end maintain_cache def get_values(attr, call, *args): log.debug("WallabyOperations: refreshing %s" % attr) try: # Wallaby API uses extensions to __getattr__ on # the Store to retrieve objects from the Broker # and return a list of proxy objects. start = time.time() d = call(*args) except: d = [] delta = time.time() - start log.debug("WallabyOperations: %s seconds to refresh %s" % (delta, attr)) return d # Wrap the entire cache thread with an exception handler def wrap_maintain_cache(): try: maintain_cache(self) log.debug("WallabyOperations: cache maintenance thread exited") except: pass if self._maintain_cache is not None and \ self._maintain_cache.isAlive(): # No, you can't start another one. return False self._stop = False if self.broker_uri is not None: # self._maintain_cache = CallThread(cProfile.runctx('maintain_cache(self)', globals(), locals(), filename='sage.stats'), None) self._maintain_cache = CallThread(wrap_maintain_cache, None) self._maintain_cache.daemon = True self._maintain_cache.start() log.debug("WallabyOperations: start cache maintenance thread") return True return False def stop(self, wait=False, timeout=None): ''' Stop the caching thread. Wake the caching thread if asleep and cause it to exit. The thread may be restarted again with a call to start() once it has successfully exited. On exit, the thread will null out cached data. wait -- if True the call will block until the thread exits or "timeout" seconds has passed if "timeout" is not None. timeout -- how long to wait for the thread to exit if "wait" is True. A value of None means wait forever. Note, for the moment start() and stop() are not thread safe. They should only be called from a single thread. ''' if self._maintain_cache is not None: self._condition.acquire() self._stop = True self._condition.notify() self._condition.release() if wait and self._maintain_cache.isAlive(): log.debug("WallabyOperations: waiting for cache maintenance thread to exit") self._maintain_cache.join(timeout) log.debug("WallabyOperations: stopped cache maintenance thread") def refresh(self, *items): ''' Wake the caching thread if asleep and cause it to iterate. items -- what data to refresh. If "items" is an empty tuple, refresh all data otherwise refresh only the data specified. Attributes of WBTypes define valid values for elements of "items" ''' self._condition.acquire() try: if len(items) == 0: do_notify = True for attr, val in self._cache.iteritems(): val.refresh = True else: do_notify = False for attr in items: if attr in self._cache: do_notify = True self._cache[attr].refresh = True if do_notify: self._condition.notify() finally: self._condition.release() def get_data(self, which, valuefilter=None): ''' Return a list of cached values for the specified category. The values returned will be proxy objects constructed by the Wallaby client library. which -- specifies the category. Attributes of WBTypes define valid values for "which" ''' d = [] self._lock.acquire() try: if which in self._cache: d = self._cache[which].data.values() # Here we handle the possible filtering of node names if which == WBTypes.NODES: if valuefilter is not None and valuefilter["nodeName"] != "%%%": filter = valuefilter["nodeName"].replace("%", "") if filter != "": d = [value for value in d if value.name.find(filter) > -1] finally: self._lock.release() return d def get_names(self, which): ''' Return a list of cached names for the specified category. The values returned will be the names of objects constructed by the Wallaby client library. which -- specifies the category. Attributes of WBTypes define valid values for "which" ''' d = [] self._lock.acquire() try: if which in self._cache: d = self._cache[which].data.keys() finally: self._lock.release() return d def get_node_by_name(self, name): ''' Return a cached wallaby.Node object by name. If name does not designate a currently cached object, None is returned. ''' return self._lookup_by_name(WBTypes.NODES, name) def get_group_by_name(self, name): ''' Return a cached wallaby.Group object by name. If name does not designate a currently cached object, None is returned. ''' return self._lookup_by_name(WBTypes.GROUPS, name) def get_tag_by_name(self, name): ''' Return a cached wallaby.Tag object by name. If name does not designate a currently cached object, None is returned. ''' return self._lookup_by_name(WBTypes.TAGS, name) def get_feature_by_name(self, name): ''' Return a cached wallaby.Feature object by name. If name does not designate a currently cached object, None is returned. ''' return self._lookup_by_name(WBTypes.FEATURES, name) def get_node_names(self, tag): ''' Return a list of node names associated with the tag. The return result is a list containing the names of nodes in the tag group. ''' names = [] if type(tag) in (str, unicode): n = tag else: n = tag.name self._lock.acquire() try: if n in self._nodes_by_tag: names = self._nodes_by_tag[n] finally: self._lock.release() return names def get_tag_names(self, node): ''' Return a list of tag names associated with the node. The return result is a list containing the names of tags on the specified node. ''' names = [] n = None if type(node) in (str, unicode): n = node elif hasattr(node, "name"): n = node.name if n is None: log.debug("WallabyOperations: get_tag_names(), parameter 'node' yields no name, returning []") else: self._lock.acquire() try: if n in self._cache[WBTypes.NODES].data: names = self._cache[WBTypes.NODES].data[n].getTags() finally: self._lock.release() return names def create_tags(self, names): ''' Create new tags in the Wallaby store. Refresh the cached lists of groups and tags. ''' if self._store is None: log.debug("WallabyOperations: create_tag, store object not yet created") return False try: self._lock.acquire() try: for name in names: self._store.addTag(name) except Exception, e: log.debug("WallabyOperations: create_tag, exception suppressed, %s" % str(e)) return False finally: self._lock.release() return True def remove_tags(self, names): ''' Remove a set of tags from the Wallaby store. Check the cached list of tags for the tag name first. Refresh cached lists of groups, tags, and nodes. ''' if self._store is None: log.debug("WallabyOperations: remove_tag, store object not yet created") return False for name in names: if self.get_tag_by_name(name) is not None: try: self._store.removeGroup(name) except Exception, e: log.debug("WallabyOperations: remove_tag, exception suppressed, %s" % str(e)) return False return True
def methodResponse(self, broker, seq, response): condition.acquire() self.count += 1 condition.notify() condition.release() # print broker, seq, response url = len(argv) > 1 and argv[1] or "amqp://localhost:5672" count = len(argv) > 2 and int(argv[2]) or 10000 submission = len(argv) > 3 and argv[3] or "submission" klass = "JobServer" package = "com.redhat.grid" console = EmptyConsole() session = Session(console); broker = session.addBroker(url) servers = session.getObjects(_class=klass, _package=package) server = servers[0] print "Found server:", server print "Iteractions:", count print "Submission:", submission start = time.time() print "Start:", start for i in xrange(count): r = session._sendMethodRequest(server.getBroker(), server.getClassKey(),
def maintain_cache(self): # Get initinal connection and Store obect self.session = Session(manageConnections=True) self.broker = self.session.addBroker(self.broker_uri, mechanisms=self.sasl_mech_list) while not self._stop: self._store = self._get_store() if self._store is not None: setup(self._store) self._partition_group = self._store.getPartitionGroup().name log.debug("WallabyOperations: found wallaby store object") break # Check stop inside the lock to make sure that we don't miss # a signal or a "stop" that was set while we were iterating. self._condition.acquire() if not self._stop: self._condition.wait(retry_secs) self._condition.release() # Init remaining time til next update to 0 for each # cached item in case the thread was restarted for attr, val in self._cache.iteritems(): val.remaining = 0 # Okay, now we're ready to retrieve data while not self._stop: start_processing = time.time() for attr, val in self._cache.iteritems(): if self._stop: break # val.remaining is the number of seconds left before # the next update of this data item. None is "forever". # Synthetic items are not retreived from the store. if not val.synthetic and \ val.remaining is not None and val.remaining <= 0: d = get_values(attr, getattr, self._store, attr, []) # If the data is empty, _set_cache will leave the # remaining field set to 0 for the attribute so we # will try to get it again on our next retry. # Otherwise, remaining will be reset to the full # interval for this attribute. self._set_cache(attr, d) # Now handle the synthetics. val.synthetic generates # and stores it's own results. for attr, val in self._cache.iteritems(): if self._stop: break if val.synthetic and \ val.remaining is not None and val.remaining <= 0: get_values(attr, val.synthetic, *val.args) log.debug("WallabyOperations: total refresh processing time %s" \ % (time.time() - start_processing)) # Find out how long we should sleep for. # Based on min remaining times for all items # If minimum is 0 because we have items waiting # for a retry, we fall back on retry_secs as a minimum. sleep_time = self._find_min_remaining(min=retry_secs) self._condition.acquire() if not self._stop: # Could be signaled, so track the actual sleep time log.debug("WallabyOperations: cache thread sleeping for"\ " %s seconds" % sleep_time) bed_time = time.time() self._condition.wait(sleep_time) slept = time.time() - bed_time log.debug("WallabyOperations: cache thread slept for"\ " %s seconds" % slept) # When we wake up from sleep here, we already # have the lock so we might as well check refresh # and adjust the "remaining" values for attr, val in self._cache.iteritems(): if val.refresh: # Force an update val.remaining = 0 val.refresh = False elif val.remaining is not None: val.remaining -= slept self._condition.release() # Clear cache if we have been stopped.... for attr in self._cache: self._set_cache(attr, []) self._store = None # Have to clean up the broker try: self.session.delBroker(self.broker) except: pass
# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.""" # Author: William Benton ([email protected]) from qmf.console import Session from sys import argv import re import os s = Session() s.addBroker() if os.getenv("QMF2PY_OUTPUT_COPYRIGHT") is not None: print """# This file is automatically generated. # Copyright (c) 2010 Red Hat, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
class Qmf: """ QMF functions not yet available in the new QMF API. Remove this and replace with new API when it becomes available. """ def __init__(self, broker): self.__session = Session() self.__broker = self.__session.addBroker("amqp://localhost:%d"%broker.port()) def add_exchange(self, exchange_name, exchange_type, alt_exchange_name=None, passive=False, durable=False, arguments = None): """Add a new exchange""" amqp_session = self.__broker.getAmqpSession() if arguments == None: arguments = {} if alt_exchange_name: amqp_session.exchange_declare(exchange=exchange_name, type=exchange_type, alternate_exchange=alt_exchange_name, passive=passive, durable=durable, arguments=arguments) else: amqp_session.exchange_declare(exchange=exchange_name, type=exchange_type, passive=passive, durable=durable, arguments=arguments) def add_queue(self, queue_name, alt_exchange_name=None, passive=False, durable=False, arguments = None): """Add a new queue""" amqp_session = self.__broker.getAmqpSession() if arguments == None: arguments = {} if alt_exchange_name: amqp_session.queue_declare(queue_name, alternate_exchange=alt_exchange_name, passive=passive, durable=durable, arguments=arguments) else: amqp_session.queue_declare(queue_name, passive=passive, durable=durable, arguments=arguments) def delete_queue(self, queue_name): """Delete an existing queue""" amqp_session = self.__broker.getAmqpSession() amqp_session.queue_delete(queue_name) def _query(self, name, _class, package, alt_exchange_name=None): """Qmf query function which can optionally look for the presence of an alternate exchange name""" try: obj_list = self.__session.getObjects(_class=_class, _package=package) found = False for obj in obj_list: if obj.name == name: found = True if alt_exchange_name != None: alt_exch_list = self.__session.getObjects(_objectId=obj.altExchange) if len(alt_exch_list) == 0 or alt_exch_list[0].name != alt_exchange_name: return False break return found except Exception: return False def query_exchange(self, exchange_name, alt_exchange_name=None): """Test for the presence of an exchange, and optionally whether it has an alternate exchange set to a known value.""" return self._query(exchange_name, "exchange", "org.apache.qpid.broker", alt_exchange_name) def query_queue(self, queue_name, alt_exchange_name=None): """Test for the presence of an exchange, and optionally whether it has an alternate exchange set to a known value.""" return self._query(queue_name, "queue", "org.apache.qpid.broker", alt_exchange_name) def queue_message_count(self, queue_name): """Query the number of messages on a queue""" queue_list = self.__session.getObjects(_class="queue", _name=queue_name) if len(queue_list): return queue_list[0].msgDepth def queue_empty(self, queue_name): """Check if a queue is empty (has no messages waiting)""" return self.queue_message_count(queue_name) == 0 def get_objects(self, target_class, target_package="org.apache.qpid.broker"): return self.__session.getObjects(_class=target_class, _package=target_package) def close(self): self.__session.delBroker(self.__broker) self.__session = None
class CuminSession(object): def __init__(self, app, broker_uris): self.app = app self.broker_uris = broker_uris self.qmf_session = None self.qmf_brokers = list() self.qmf_agents = dict() # int seq => callable self.outstanding_method_calls = dict() self.lock = Lock() def add_broker(self, uri): mechs = get_sasl_mechanisms(uri, self.app.sasl_mech_list) uri_without_password = uri[uri.rfind("@") + 1:] log.info("Adding QMF broker at %s with mech_list %s", uri_without_password, mechs) assert self.qmf_session qmf_broker = self.qmf_session.addBroker(uri,mechanisms=mechs) self.qmf_brokers.append(qmf_broker) def check(self): log.info("Checking %s", self) def init(self): uris_without_password = [x[x.rfind("@")+1:] for x in self.broker_uris] log.info("Initializing %s", uris_without_password) def start(self): log.info("Starting %s", self) assert self.qmf_session is None self.qmf_session = Session(CuminConsole(self), manageConnections=True, rcvObjects=False, rcvEvents=False, rcvHeartbeats=False) for uri in self.broker_uris: self.add_broker(uri) def stop(self): log.info("Stopping %s", self) for qmf_broker in self.qmf_brokers: self.qmf_session.delBroker(qmf_broker) def get_agent(self, agent_id): self.lock.acquire() try: return self.qmf_agents.get(agent_id) finally: self.lock.release() def call_method(self, callback, obj, name, args): assert isinstance(obj, RosemaryObject) for i in range(10): agent = self.get_agent(obj._qmf_agent_id) if agent: break sleep(1) if not agent: raise Exception("Agent '%s' is unknown" % obj._qmf_agent_id) if obj._qmf_object_id.isdigit(): # Translate v1 object ids if obj._class is self.app.model.org_apache_qpid_broker.Queue: # A very special workaround for queue keys key = obj.name else: key_args = [str(getattr(obj, x.name)) for x in obj._class._attributes if x.index and not x.references] key = ",".join(key_args) id_args = (obj._class._package._name, obj._class._name.lower(), key) object_id = ":".join(id_args) else: object_id = obj._qmf_object_id oid = ObjectId({"_agent_name": obj._qmf_agent_id, "_object_name": object_id}) qmf_objs = agent.getObjects(_objectId=oid) try: qmf_obj = qmf_objs[0] except IndexError: raise Exception("Object '%s' is unknown" % object_id) self.lock.acquire() try: seq = qmf_obj._invoke(name, args, {"_async": True}) assert seq is not None self.outstanding_method_calls[seq] = callback return seq finally: self.lock.release() def __repr__(self): uris_without_password = [x[x.rfind("@")+1:] for x in self.broker_uris] return "%s(%s)" % (self.__class__.__name__, uris_without_password)
# you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from qmf.console import Session from sys import exit, argv url = len(argv) > 2 and argv[2] or "amqp://localhost:5672" session = Session() try: broker = session.addBroker(url) except: print 'Unable to connect to broker' exit(1) try: agentName = "com.redhat.grid:%s" % argv[1] print "Connecting to agent at ", agentName agent = broker.getAgent(broker.getBrokerBank(), agentName) submissions = agent.getObjects(_class="submission", _package='com.redhat.grid') except: print 'Unable to access agent or submissions' session.delBroker(broker)
} quiet = False url = "amqp://localhost:5672" for arg in argv[1:]: if arg == '-q': quiet = True if "amqp://" in arg: url = arg if not quiet: print ad; try: session = Session() broker = session.addBroker(url) schedulers = session.getObjects(_class="scheduler", _package="com.redhat.grid") except Exception, e: print "unable to access broker or scheduler" exit(1) result = schedulers[0].SubmitJob(ad) if result.status: print "Error submitting:", result.text session.delBroker(broker) exit(1) if not quiet: print "Submitted:", result.Id
help="Qpidd port [default: 5672]") parser.set_defaults(host = "localhost") parser.add_option("-x", "--host", dest="host", metavar="HOST", help="Qpidd host [default: localhost]") (opts, args) = parser.parse_args() queueNames = [] if (args): for q in args: queueNames.append(q); else: parser.error("At least one QueueName is required.") sys.exit(1) sess = Session() try: brokerUrl = "amqp://%s:%s" % (opts.host, opts.port) broker = sess.addBroker(brokerUrl) except socket.error: sys.stderr.write("Failed to connect to broker at '%s'." % brokerUrl) sys.exit(2) qs = [q for q in sess.getObjects(_class="queue", _package="org.apache.qpid.broker") if q.name in queueNames] if not qs: sys.stderr.write("Didn't found queues matching provided QueueNames.") sys.exit() if (len(qs) > 0): for q in qs:
#!/usr/bin/env python # qmf2markdown.py # documents the QMF classes from the packages listed in argv; # runs against a broker in localhost. # Public domain. # Author: William Benton ([email protected]) from qmf.console import Session from sys import argv s = Session() s.addBroker() qmftypes = { 12: "float", 6: "sstr", 15: "map", 3: "uint32", 17: "int16", 8: "abstime", 19: "int64", 10: "ref", 2: "uint16", 16: "int8", 13: "double", 21: "list", 11: "bool", 4: "uint64", 22: "array",
# you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from qmf.console import Session from sys import exit, argv url = len(argv) > 1 and argv[1] or "amqp://localhost:5672" session = Session() try: broker = session.addBroker(url) except: print 'Unable to connect to broker' exit(1) submitters = session.getObjects(_class="submitter", _package='com.redhat.grid') print "Current Submitters:" for submitter in submitters: print submitter.Name print "\tRunning jobs = ", submitter.RunningJobs print "\tIdle jobs = ", submitter.IdleJobs print "\tHeld jobs = ", submitter.HeldJobs, "\n" # get the scheduler ref
# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from qmf.console import Session from sys import exit, argv gridclasses = ["master", "grid", "collector", "negotiator", "slot", "scheduler", "jobserver", "submitter", "submission"] url = len(argv) > 1 and argv[1] or "amqp://localhost:5672" session = Session() try: broker = session.addBroker(url) except: print "Unable to connect to broker" exit(1) print session.getPackages() for gridclass in gridclasses: qmfobjects = session.getObjects(_class=gridclass, _package="com.redhat.grid") for qmfobject in qmfobjects: print "\n" print gridclass, "=", qmfobject.getObjectId() print "*****Properties*****" for prop in qmfobject.getProperties():
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Need ENABLE_RUNTIME_CONFIG = True # and appopriate config params as # shown for this test from qmf.console import Session from sys import exit, argv url = len(argv) > 1 and argv[1] or "amqp://localhost:5672" session = Session() try: broker = session.addBroker(url) except: print 'Unable to connect to broker' exit(1) quota = 0.01 negotiators = session.getObjects(_class="negotiator", _package='com.redhat.grid') print "Current Negotiator:" for negotiator in negotiators: print negotiator.Name print '\t', negotiator.GetLimits() print '\t', negotiator.GetRawConfig('GROUP_NAMES')
#!/usr/bin/python from qpid.messaging import * from qmf.console import Session as QmfSession import socket broker = "127.0.0.1:5672" address = "mrg.grid.config.notifications" connection = Connection(broker) try: qmfsession = QmfSession() broker = qmfsession.addBroker(broker,30, "ANONYMOUS") # Declare the exchange broker.getAmqpSession().exchange_declare(exchange=address, type="fanout") qmfsession.delBroker(broker) del qmfsession connection.open() session = connection.session() # content = {"nodes": [socket.gethostname()], "version": "a1b2c3d4e5f6g7h8"} content = {"nodes": [socket.gethostname()]} sender = session.sender(address) sender.send(Message(content=content)) except MessagingError,m: print m except Exception, e: print e
#!/usr/bin/python from qpid.messaging import * from qmf.console import Session as QmfSession import socket broker = "127.0.0.1:5672" address = "mrg.grid.config.notifications" connection = Connection(broker) try: qmfsession = QmfSession() broker = qmfsession.addBroker(broker, 30, "ANONYMOUS") # Declare the exchange broker.getAmqpSession().exchange_declare(exchange=address, type="fanout") qmfsession.delBroker(broker) del qmfsession connection.open() session = connection.session() # content = {"nodes": [socket.gethostname()], "version": "a1b2c3d4e5f6g7h8"} content = {"nodes": [socket.gethostname()]} sender = session.sender(address) sender.send(Message(content=content)) except MessagingError, m: print m except Exception, e: print e
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Need ENABLE_RUNTIME_CONFIG = True # and appopriate config params as # shown for this test from qmf.console import Session from sys import exit, argv url = len(argv) > 1 and argv[1] or "amqp://localhost:5672" session = Session(); try: broker = session.addBroker(url) except: print 'Unable to connect to broker' exit(1) quota = 0.01 negotiators = session.getObjects(_class="negotiator", _package='com.redhat.grid') print "Current Negotiator:" for negotiator in negotiators: print negotiator.Name print '\t',negotiator.GetLimits() print '\t',negotiator.GetRawConfig('GROUP_NAMES') old_quota = negotiator.GetRawConfig('GROUP_QUOTA_DYNAMIC_MGMT.CUMIN')
} quiet = False url = "amqp://localhost:5672" for arg in argv[1:]: if arg == '-q': quiet = True if "amqp://" in arg: url = arg if not quiet: print ad; try: session = Session(); # ensure that the value of sasl mechanisms is installed and # configured in /etc/sasl2/qpidd.conf broker = session.addBroker(url,mechanisms='PLAIN') except Exception, e: print "unable to access broker or scheduler" print e exit(1) schedulers = session.getObjects(_class="scheduler", _package="com.redhat.grid") result = schedulers[0].SubmitJob(ad) if result.status: print "Error submitting:", result.text session.delBroker(broker) exit(1)
time.sleep(20) # Check if the configd process is still there status = os.waitpid(configd_pid, os.WNOHANG) if status != (0, 0): if os.WIFEXITED(status[1]) and os.WEXITSTATUS(status[1]) != 0 and status[0] == configd_pid: print 'PASS' else: print 'FAILED (Process exit not correct type "%d")' % os.WEXITSTATUS(status[1]) else: print 'FAILED (Configd still running)' os.kill(bad_store_pid, 9) # Connect to the broker session = Session() session.addBroker('amqp://127.0.0.1:5672') # Wait until there is only 1 store running while True: cnt = 0 o = session.getAgents() for a in o: if a.label == 'com.redhat.grid.config:Store': cnt += 1 if cnt == 1: break else: time.sleep(1) # Start the configd
host = "127.0.0.1" port = 12345 #self.send_to_splunk(host, port, "queue=%s,enqueues=%d,dequeues=%d,queue_size_count=%d,queue_size_bytes=%d,consumers=%d,consumers_high=%d,consumers_low=%d, bindings=%d, bindings_high=%d, bindings_low=%d" % (queue_name,total_enqueues,total_dequeues,queue_size_count,queue_size_bytes,consumer_count,consumer_high,consumer_low,binding_count,binding_high, binding_low)) # if the delete-time is non-zero, this object has been deleted. Remove it from the map. if record.getTimestamps()[2] > 0: queueMap.pop(oid) def send_to_splunk(self, host, port, message): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) s.sendall(message) s.close() # Create an instance of the QMF session manager. Set userBindings to True to allow # this program to choose which objects classes it is interested in. sess = Session(MyConsole(), manageConnections=True, rcvEvents=False, userBindings=True) # Register to receive updates for broker:queue objects. sess.bindClass("org.apache.qpid.broker", "queue") broker = sess.addBroker() # Suspend processing while the asynchronous operations proceed. try: while True: sleep(1) except: pass # Disconnect the broker before exiting. sess.delBroker(broker)
from qmf.console import Session import wallaby # create a new console object console = Session() # connect to the broker (on localhost:5672, by default) console.addBroker() # find the QMF object for the wallaby service raw_store, = console.getObjects(_class="Store") # wrap it up in a client object store = wallaby.Store(raw_store, console) # now, interact with it! node = store.addNode("barney.local.") feature = store.addFeature("Example feature") param = store.addParam("EXAMPLE_PARAM") # most "options" arguments are indeed optional feature.modifyParams("ADD", {"EXAMPLE_PARAM":"example value"}) store.getDefaultGroup().modifyFeatures("ADD", ["Example feature"]) node.getConfig() # get documentation on a method help(store.activateConfiguration)
self.count += 1 condition.notify() condition.release() # print broker, seq, response url = len(argv) > 1 and argv[1] or "amqp://localhost:5672" count = len(argv) > 2 and int(argv[2]) or 10000 submission = len(argv) > 3 and argv[3] or "submission" klass = "JobServer" package = "com.redhat.grid" console = EmptyConsole() session = Session(console) broker = session.addBroker(url) servers = session.getObjects(_class=klass, _package=package) server = servers[0] print "Found server:", server print "Iteractions:", count print "Submission:", submission start = time.time() print "Start:", start for i in xrange(count): r = session._sendMethodRequest(server.getBroker(), server.getClassKey(), server.getObjectId(), "GetJobAds",
class MintSession(object): def __init__(self, app, broker_uris): self.app = app self.broker_uris = broker_uris self.qmf_session = None self.qmf_brokers = list() def add_broker(self, uri): mechs = get_sasl_mechanisms(uri, self.app.sasl_mech_list) uri_without_password = uri[uri.rfind("@") + 1:] log.info("Adding QMF broker at %s with mech_list %s", uri_without_password, mechs) assert self.qmf_session qmf_broker = self.qmf_session.addBroker(uri, mechanisms=mechs) self.qmf_brokers.append(qmf_broker) def check(self): log.info("Checking %s", self) def init(self): log.info("Initializing %s", self) def init_qmf_classes(self): # Apply the package filter to the class list if len(self.app.qmf_classes): black_list = set() for cls in self.app.qmf_classes: if cls._package._name in self.app.qmf_package_filter: black_list.add(cls) self.app.qmf_classes.difference_update(black_list) else: # Generate the package list from the model, minus # the package filter for pkg in self.app.model._packages: if pkg._name not in self.app.qmf_package_filter: self.app.qmf_packages.add(pkg) def start(self): log.info("Starting %s", self) assert self.qmf_session is None self.qmf_session = Session(MintConsole(self.app.model), manageConnections=True, rcvObjects=True, rcvEvents=False, rcvHeartbeats=False, userBindings=True) if len(self.app.qmf_agents): for agent in self.app.qmf_agents: if len(agent) == 1: self.qmf_session.bindAgent(label=agent[0]) log.info("Binding agent, label is %s" % agent[0]) else: self.qmf_session.bindAgent(vendor=agent[0], product=agent[1], instance=agent[2]) log.info("Binding agent %s:%s:%s" % (agent[0],agent[1],agent[2])) else: self.qmf_session.bindAgent("*") log.info("Binding all agents") # Handle bind by class if len(self.app.qmf_classes): for cls in self.app.qmf_classes: pname = cls._package._name cname = cls._name self.qmf_session.bindClass(pname.lower(), cname.lower()) log.info("Binding QMF class %s.%s" % (pname, cname)) else: # Handle bind by package for pkg in self.app.qmf_packages: self.qmf_session.bindPackage(pkg._name.lower()) log.info("Binding QMF package %s" % pkg._name) for uri in self.broker_uris: self.add_broker(uri) def stop(self): log.info("Stopping %s", self) for qmf_broker in self.qmf_brokers: self.qmf_session.delBroker(qmf_broker) def __repr__(self): uris_without_password = [x[x.rfind("@")+1:] for x in self.broker_uris] return "%s(%s)" % (self.__class__.__name__, uris_without_password)
# you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from qmf.console import Session from sys import exit, argv url = len(argv) > 1 and argv[1] or "amqp://localhost:5672" session = Session(); try: broker = session.addBroker(url) except: print 'Unable to connect to broker' exit(1) submitters = session.getObjects(_class="submitter", _package='com.redhat.grid') print "Current Submitters:" for submitter in submitters: print submitter.Name print "\tRunning jobs = ", submitter.RunningJobs print "\tIdle jobs = ", submitter.IdleJobs print "\tHeld jobs = ", submitter.HeldJobs,"\n" # get the scheduler ref
def __init__(self, broker): self.__session = Session() self.__broker = self.__session.addBroker("amqp://localhost:%d"%broker.port())
from qmf.console import Session from qmf.console import ClassKey from qpid.log import enable, DEBUG import sys s = Session() br = s.addBroker() service = s.getObjects(_class="ExampleService", _package="qmf.example.ejb")[0] print "Service Methods" print service.getMethods() base = service.getBase("Nathan", "A Cool Dude").result print "Base: ", base print "Base.name: ", base.name print "Base.description: ", base.description print "Base.stuff: ", base.stuff derived = service.getDerived("Gillian", "A Cool Dudette", 99).result print "Derived: ", derived print "Derived.name: ", derived.name print "Derived.description: ", derived.description print "Derived.stuff: ", derived.stuff print "Derived.count: ", derived.count many = service.findMany().result print "Many: ", many print "Many[0].stuff", many[0].stuff
# distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from qmf.console import Session from sys import exit, argv, stdin import time jobid = len(argv) > 1 and argv[1] attr_name = len(argv) > 2 and argv[2] attr_value = len(argv) > 3 and argv[3] url = len(argv) > 4 and argv[4] or "amqp://localhost:5672" try: session = Session() broker = session.addBroker(url) schedulers = session.getObjects(_class="scheduler", _package="com.redhat.grid") except Exception, e: print "unable to access broker or scheduler" print e exit(1) result = schedulers[0].SetJobAttribute(jobid, attr_name, attr_value) if result.status: print result.text session.delBroker(broker) exit(1)
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from qmf.console import Session from sys import exit, argv gridclasses = [ 'master', 'grid', 'collector', 'negotiator', 'slot', 'scheduler', 'jobserver', 'submitter', 'submission' ] url = len(argv) > 1 and argv[1] or "amqp://localhost:5672" session = Session() try: broker = session.addBroker(url) except: print 'Unable to connect to broker' exit(1) print session.getPackages() for gridclass in gridclasses: qmfobjects = session.getObjects(_class=gridclass, _package='com.redhat.grid') for qmfobject in qmfobjects: print '\n' print gridclass, '=', qmfobject.getObjectId() print "*****Properties*****"