コード例 #1
0
ファイル: ha_tests.py プロジェクト: kgiusti/qpid-cpp-debian
        def verify(b, prefix, p):
            """Verify setup was replicated to backup b"""
            # Wait for configuration to replicate.
            wait_address(b, prefix + "x")
            self.assert_browse_retry(b, prefix + "q1", ["b", "1", "4"])

            self.assertEqual(
                p.receiver(prefix + "q1").fetch(timeout=0).content, "b")
            p.acknowledge()
            self.assert_browse_retry(b, prefix + "q1", ["1", "4"])

            self.assert_browse_retry(b, prefix + "q2",
                                     [])  # configuration only
            assert not valid_address(b, prefix + "q3")
            b.sender(prefix + "e1").send(
                Message(prefix + "e1"))  # Verify binds with replicate=all
            self.assert_browse_retry(b, prefix + "q1",
                                     ["1", "4", prefix + "e1"])
            b.sender(prefix + "e2").send(
                Message(prefix +
                        "e2"))  # Verify binds with replicate=configuration
            self.assert_browse_retry(b, prefix + "q2", [prefix + "e2"])

            b.sender(prefix + "e4").send(Message("drop2"))  # Verify unbind.
            self.assert_browse_retry(b, prefix + "q4", ["6", "7"])
コード例 #2
0
ファイル: amqp.py プロジェクト: gonicus/clacks
 def send(self, data):
     self.log.debug("sending event: %s" % data)
     msg = Message(data)
     msg.user_id = self.__user
     try:
         return self.__sender.send(msg)
     except NotFound as e:
         self.log.critical("cannot send event: %s" % str(e))
         self.env.requestRestart()
         return False
コード例 #3
0
ファイル: amqp_proxy.py プロジェクト: gonicus/clacks
    def __call__(self, *args, **kwargs):
        if len(kwargs) > 0 and len(args) > 0:
            raise JSONRPCException("JSON-RPC does not support positional and keyword arguments at the same time")

        # Default to 'core' queue
        queue = "core" #@UnusedVariable

        if self.__methods:
            if not self.__serviceName in self.__methods:
                raise NameError("name '%s' not defined" % self.__serviceName)

            if self.__domain:
                queue = self.__methods[self.__serviceName]['target'] #@UnusedVariable
            else:
                queue = self.__serviceAddress #@UnusedVariable

        # Find free session for requested queue
        found = False
        for sess, dsc in self.__worker[self.__serviceAddress].iteritems():
            if not dsc['locked']:
                self.__ssn = dsc['ssn']
                self.__sender = dsc['sender']
                self.__receiver = dsc['receiver']
                self.__sess = sess
                dsc['locked'] = True
                found = True
                break

        # No free session?
        if not found:
            raise AMQPException('no free session - increase workers')

        # Send
        if len(kwargs):
            postdata = dumps({"method": self.__serviceName, 'params': kwargs, 'id': 'jsonrpc'})
        else:
            postdata = dumps({"method": self.__serviceName, 'params': args, 'id': 'jsonrpc'})

        message = Message(postdata)
        message.user_id = self.__URL['user']
        message.reply_to = 'reply-%s' % self.__ssn.name

        self.__sender.send(message)

        # Get response
        respdata = self.__receiver.fetch()
        resp = loads(respdata.content)
        self.__ssn.acknowledge(respdata)

        self.__worker[self.__serviceAddress][self.__sess]['locked'] = False

        if resp['error'] != None:
            raise JSONRPCException(resp['error'])

        return resp['result']
コード例 #4
0
ファイル: ha_store_tests.py プロジェクト: binarycod3r/qpid
    def test_catchup_store(self):
        """Verify that a backup erases queue data from store recovery before
        doing catch-up from the primary."""
        cluster = HaCluster(self, 2)
        sn = cluster[0].connect().session()
        s1 = sn.sender("q1;{create:always,node:{durable:true}}")
        for m in ["foo", "bar"]:
            s1.send(Message(m, durable=True))
        s2 = sn.sender("q2;{create:always,node:{durable:true}}")
        sk2 = sn.sender(
            "ex/k2;{create:always,node:{type:topic, durable:true, x-declare:{type:'direct'}, x-bindings:[{exchange:ex,key:k2,queue:q2}]}}"
        )
        sk2.send(Message("hello", durable=True))
        # Wait for backup to catch up.
        cluster[1].assert_browse_backup("q1", ["foo", "bar"])
        cluster[1].assert_browse_backup("q2", ["hello"])

        # Make changes that the backup doesn't see
        cluster.kill(1, promote_next=False)
        time.sleep(1)  # FIXME aconway 2012-09-25:
        r1 = cluster[0].connect().session().receiver("q1")
        for m in ["foo", "bar"]:
            self.assertEqual(r1.fetch().content, m)
        r1.session.acknowledge()
        for m in ["x", "y", "z"]:
            s1.send(Message(m, durable=True))
        # Use old connection to unbind
        us = cluster[0].connect_old().session(str(uuid4()))
        us.exchange_unbind(exchange="ex", binding_key="k2", queue="q2")
        us.exchange_bind(exchange="ex", binding_key="k1", queue="q1")
        # Restart both brokers from store to get inconsistent sequence numbering.
        cluster.bounce(0, promote_next=False)
        cluster[0].promote()
        cluster[0].wait_status("active")
        cluster.restart(1)
        cluster[1].wait_status("ready")

        # Verify state
        cluster[0].assert_browse("q1", ["x", "y", "z"])
        cluster[1].assert_browse_backup("q1", ["x", "y", "z"])
        sn = cluster[0].connect().session(
        )  # FIXME aconway 2012-09-25: should fail over!
        sn.sender("ex/k1").send("boo")
        cluster[0].assert_browse_backup("q1", ["x", "y", "z", "boo"])
        cluster[1].assert_browse_backup("q1", ["x", "y", "z", "boo"])
        sn.sender("ex/k2").send(
            "hoo")  # q2 was unbound so this should be dropped.
        sn.sender("q2").send(
            "end")  # mark the end of the queue for assert_browse
        cluster[0].assert_browse("q2", ["hello", "end"])
        cluster[1].assert_browse_backup("q2", ["hello", "end"])
コード例 #5
0
ファイル: ha_tests.py プロジェクト: kgiusti/qpid-cpp-debian
 def setup(p, prefix, primary):
     """Create config, send messages on the primary p"""
     s = p.sender(queue(prefix + "q1", "all"))
     for m in ["a", "b", "1"]:
         s.send(Message(m))
     # Test replication of dequeue
     self.assertEqual(
         p.receiver(prefix + "q1").fetch(timeout=0).content, "a")
     p.acknowledge()
     p.sender(queue(prefix + "q2", "configuration")).send(Message("2"))
     p.sender(queue(prefix + "q3", "none")).send(Message("3"))
     p.sender(exchange(prefix + "e1", "all",
                       prefix + "q1")).send(Message("4"))
     p.sender(exchange(prefix + "e2", "all",
                       prefix + "q2")).send(Message("5"))
     # Test  unbind
     p.sender(queue(prefix + "q4", "all")).send(Message("6"))
     s3 = p.sender(exchange(prefix + "e4", "all", prefix + "q4"))
     s3.send(Message("7"))
     # Use old connection to unbind
     us = primary.connect_old().session(str(uuid4()))
     us.exchange_unbind(exchange=prefix + "e4",
                        binding_key="",
                        queue=prefix + "q4")
     p.sender(prefix + "e4").send(Message("drop1"))  # Should be dropped
     # Need a marker so we can wait till sync is done.
     p.sender(queue(prefix + "x", "configuration"))
コード例 #6
0
    def _resize_test(self, queue_name, num_msgs, msg_size, resize_num_files, resize_file_size, init_num_files = 8,
                    init_file_size = 24, exp_fail = False, wait_time = None):
        # Using a sender will force the creation of an empty persistent queue which is needed for some tests
        broker = self.broker(store_args(), name="broker", expect=EXPECT_EXIT_OK, wait=wait_time)
        ssn = broker.connect().session()
        snd = ssn.sender("%s; {create:always, node:{durable:True}}" % queue_name)

        msgs = []
        for index in range(0, num_msgs):
            msg = Message(self.make_message(index, msg_size), durable=True, id=uuid4(), correlation_id="msg-%04d"%index)
            msgs.append(msg)
            snd.send(msg)
        broker.terminate()

        res = self._resize_store(os.path.join(self.dir, "broker", "rhm", "jrnl"), queue_name, resize_num_files,
                             resize_file_size, exp_fail)
        if res != 0:
            if exp_fail:
                return
            self.fail("ERROR: Resize operation failed with return code %d" % res)
        elif exp_fail:
            self.fail("ERROR: Resize operation succeeded, but a failure was expected")

        broker = self.broker(store_args(), name="broker")
        self.check_messages(broker, queue_name, msgs, True)
コード例 #7
0
    def _method(self,
                method,
                arguments,
                addr="org.apache.qpid.broker:broker:amqp-broker",
                timeout=10):
        props = {
            'method': 'request',
            'qmf.opcode': '_method_request',
            'x-amqp-0-10.app-id': 'qmf2'
        }
        correlator = str(self.next_correlator)
        self.next_correlator += 1

        content = {
            '_object_id': {
                '_object_name': addr
            },
            '_method_name': method,
            '_arguments': arguments
        }

        message = Message(content,
                          reply_to=self.reply_to,
                          correlation_id=correlator,
                          properties=props,
                          subject="broker")
        self.tx.send(message)
        response = self.reply_rx.fetch(timeout)
        self.sess.acknowledge()
        if response.properties['qmf.opcode'] == '_exception':
            raise Exception("Exception from Agent: %r" %
                            response.content['_values'])
        if response.properties['qmf.opcode'] != '_method_response':
            raise Exception("bad response: %r" % response.properties)
        return response.content['_arguments']
コード例 #8
0
ファイル: ha_tests.py プロジェクト: kgiusti/qpid-cpp-debian
 def test_priority_fairshare(self):
     """Verify priority queues replicate correctly"""
     primary = HaBroker(self, name="primary")
     primary.promote()
     backup = HaBroker(self, name="backup", brokers_url=primary.host_port())
     session = primary.connect().session()
     levels = 8
     priorities = [
         4, 5, 3, 7, 8, 8, 2, 8, 2, 8, 8, 16, 6, 6, 6, 6, 6, 6, 8, 3, 5, 8,
         3, 5, 5, 3, 3, 8, 8, 3, 7, 3, 7, 7, 7, 8, 8, 8, 2, 3
     ]
     limits = {7: 0, 6: 4, 5: 3, 4: 2, 3: 2, 2: 2, 1: 2}
     limit_policy = ",".join(["'qpid.fairshare':5"] + [
         "'qpid.fairshare-%s':%s" % (i[0], i[1])
         for i in limits.iteritems()
     ])
     s = session.sender(
         "priority-queue; {create:always, node:{x-declare:{arguments:{'qpid.priorities':%s, %s}}}}"
         % (levels, limit_policy))
     messages = [
         Message(content=str(uuid4()), priority=p) for p in priorities
     ]
     for m in messages:
         s.send(m)
     backup.wait_backup(s.target)
     r = backup.connect_admin().session().receiver("priority-queue")
     received = [r.fetch().content for i in priorities]
     sort = sorted(messages,
                   key=lambda m: priority_level(m.priority, levels),
                   reverse=True)
     fair = [
         m.content
         for m in fairshare(sort, lambda l: limits.get(l, 0), levels)
     ]
     self.assertEqual(received, fair)
コード例 #9
0
    def send_obj(self, datex_obj):
        """
        Use data from the 'datex2' object to construct a proper
        AMQP object with all the required properties set.
        """
        tz = pytz.timezone("Europe/Oslo")
        now_iso_timestamp = datetime.datetime.now(tz).isoformat()
        centroid = datex_obj.centroid
        prop = {
            "who": "Norwegian Public Roads Administration",
            "how": "Datex2",
            "what": "PredefinedLocation",
            "lat": centroid[0],
            "lon": centroid[1],
            "where1": "no",
            "when": now_iso_timestamp
        }

        m = Message(user_id=self._credentials.get("username"),
                    properties=prop,
                    content=str(datex_obj))

        self.log.debug(u"Sending message: version={}, name={}".format(
            datex_obj.version, datex_obj.name))
        self.send_messsage(m)
コード例 #10
0
ファイル: ha_store_tests.py プロジェクト: binarycod3r/qpid
 def verify(broker, x_count):
     sn = broker.connect().session()
     assert_browse(sn, "qq",
                   ["bar", "baz", "flush"] + (x_count) * ["x"])
     sn.sender("xx/k").send(Message("x", durable=True))
     assert_browse(sn, "qq",
                   ["bar", "baz", "flush"] + (x_count + 1) * ["x"])
コード例 #11
0
 def send(self, message):
     '''shortcut for self.sender.send(Message(content=json.dumps(message))'''
     self.last_sent = Message(content=json.dumps(message))
     ret = self.sender.send(self.last_sent)
     if self._asserting:
         assert self.is_ok, 'QpidHandle was not OK:\n%s' % self
     return ret
コード例 #12
0
    def send(self, routing_key, msg_str):
        msg = Message(subject=routing_key,
                      content_type=DEF_CONTENTTYPE,
                      content=msg_str)

        if __debug__: print 'Sending', msg

        self.__sender.send(msg)
コード例 #13
0
ファイル: ha_tests.py プロジェクト: kgiusti/qpid-cpp-debian
 def test_ring(self):
     """Test replication with the ring queue policy"""
     primary = HaBroker(self, name="primary")
     primary.promote()
     backup = HaBroker(self, name="backup", brokers_url=primary.host_port())
     s = primary.connect().session().sender(
         "q; {create:always, node:{x-declare:{arguments:{'qpid.policy_type':ring, 'qpid.max_count':5}}}}"
     )
     for i in range(10):
         s.send(Message(str(i)))
     backup.assert_browse_backup("q", [str(i) for i in range(5, 10)])
コード例 #14
0
        def setNameIfNecessary(deviceUUID, name):
            dev = self.inventory['devices'].get(deviceUUID)
            if (dev is None or dev['name'] == '') and name != '':
                content = {"command": "setdevicename",
                           "uuid"   : self.agoController,
                           "device" : deviceUUID,
                           "name"   : name}

                message = Message(content=content)
                self.connection.send_message(None, content)
                self.log.debug("'setdevicename' message sent for %s, name=%s", deviceUUID, name)
コード例 #15
0
ファイル: ha_store_tests.py プロジェクト: binarycod3r/qpid
    def test_store_recovery(self):
        """Verify basic store and recover functionality"""
        cluster = HaCluster(self, 2)
        sn = cluster[0].connect().session()
        s = sn.sender("qq;{create:always,node:{durable:true}}")
        sk = sn.sender(
            "xx/k;{create:always,node:{type:topic, durable:true, x-declare:{type:'direct'}, x-bindings:[{exchange:xx,key:k,queue:qq}]}}"
        )
        s.send(Message("foo", durable=True))
        s.send(Message("bar", durable=True))
        sk.send(Message("baz", durable=True))
        r = cluster[0].connect().session().receiver("qq")
        self.assertEqual(r.fetch().content, "foo")
        r.session.acknowledge()
        # FIXME aconway 2012-09-21: sending this message is an ugly hack to flush
        # the dequeue operation on qq.
        s.send(Message("flush", durable=True))

        def verify(broker, x_count):
            sn = broker.connect().session()
            assert_browse(sn, "qq",
                          ["bar", "baz", "flush"] + (x_count) * ["x"])
            sn.sender("xx/k").send(Message("x", durable=True))
            assert_browse(sn, "qq",
                          ["bar", "baz", "flush"] + (x_count + 1) * ["x"])

        verify(cluster[0], 0)
        cluster.bounce(0, promote_next=False)
        cluster[0].promote()
        cluster[0].wait_status("active")
        verify(cluster[0], 1)
        cluster.kill(0, promote_next=False)
        cluster[1].promote()
        cluster[1].wait_status("active")
        verify(cluster[1], 2)
        cluster.bounce(1, promote_next=False)
        cluster[1].promote()
        cluster[1].wait_status("active")
        verify(cluster[1], 3)
コード例 #16
0
ファイル: messaging.py プロジェクト: KeithLatteri/awips2
 def testProperties(self):
   msg = Message()
   msg.to = "to-address"
   msg.subject = "subject"
   msg.correlation_id = str(self.test_id)
   msg.properties = MessageEchoTests.TEST_MAP
   msg.reply_to = "reply-address"
   self.check(msg)
コード例 #17
0
 def _sendRequest(self, opcode, content):
     props = {
         'method': 'request',
         'qmf.opcode': opcode,
         'x-amqp-0-10.app-id': 'qmf2'
     }
     correlator = str(self.next_correlator)
     self.next_correlator += 1
     message = Message(content,
                       reply_to=self.reply_to,
                       correlation_id=correlator,
                       properties=props,
                       subject="broker")
     self.tx.send(message)
     return correlator
コード例 #18
0
ファイル: ha_tests.py プロジェクト: kgiusti/qpid-cpp-debian
 def test_reject(self):
     """Test replication with the reject queue policy"""
     primary = HaBroker(self, name="primary")
     primary.promote()
     backup = HaBroker(self, name="backup", brokers_url=primary.host_port())
     s = primary.connect().session().sender(
         "q; {create:always, node:{x-declare:{arguments:{'qpid.policy_type':reject, 'qpid.max_count':5}}}}"
     )
     try:
         for i in range(10):
             s.send(Message(str(i)), sync=False)
     except qpid.messaging.exceptions.TargetCapacityExceeded:
         pass
     backup.assert_browse_backup("q", [str(i) for i in range(0, 5)])
     # Detach, don't close as there is a broken session
     s.session.connection.detach()
コード例 #19
0
    def keepAliveWrite(self):
        """
		 当mq为写入类型时,防止由于长时间没有消息进入,导致后继读取mq堵塞的情况,需要定时发送保活消息
		这个问题目前没有应对方案,只能通过在写mq通道上频繁写入小数据,强迫mq接收者响应消息的读取。
		:return:
		"""
        from qpid.messaging import Message
        while True:
            gevent.sleep(5)
            print 'keepalive write, target: ', self.ep.addr
            try:
                if self.exitflag:
                    return True
                m = Message("keepalive")
                self.snd.send(m, False)
            except:
                log_error(traceback.format_exc())
コード例 #20
0
ファイル: ha_tests.py プロジェクト: kgiusti/qpid-cpp-debian
 def test_priority(self):
     """Verify priority queues replicate correctly"""
     primary = HaBroker(self, name="primary")
     primary.promote()
     backup = HaBroker(self, name="backup", brokers_url=primary.host_port())
     session = primary.connect().session()
     s = session.sender(
         "priority-queue; {create:always, node:{x-declare:{arguments:{'qpid.priorities':10}}}}"
     )
     priorities = [8, 9, 5, 1, 2, 2, 3, 4, 9, 7, 8, 9, 9, 2]
     for p in priorities:
         s.send(Message(priority=p))
     # Can't use browse_backup as browser sees messages in delivery order not priority.
     backup.wait_backup("priority-queue")
     r = backup.connect_admin().session().receiver("priority-queue")
     received = [r.fetch().priority for i in priorities]
     self.assertEqual(sorted(priorities, reverse=True), received)
コード例 #21
0
 def send(self, address, content, ttl=None):
     """
     Send a message.
     :param address: An AMQP address.
     :type address: str
     :param content: The message content
     :type content: buf
     :param ttl: Time to Live (seconds)
     :type ttl: float
     """
     sender = self.session.sender(address)
     try:
         message = Message(content=content, durable=self.durable, ttl=ttl)
         sender.send(message)
         log.debug('sent (%s)', address)
     finally:
         sender.close()
コード例 #22
0
ファイル: model.py プロジェクト: pombreda/gofer
 def __call__(self):
     """
     Invoke the method.
     :raise: Error on failure.
     """
     self.open()
     try:
         request = Message(content=self.content,
                           reply_to=self.reply_to,
                           properties=self.properties,
                           correlation_id=utf8(uuid4()),
                           subject=SUBJECT)
         self.sender.send(request)
         reply = self.receiver.fetch()
         self.session.acknowledge()
         self.on_reply(reply)
     finally:
         self.close()
コード例 #23
0
    def sendDetail(self, m):
        from qpid.messaging import Message
        try:
            if self.exitflag:
                return True
#			if not self.conn:
#				broker = "%s:%s"%(self.ep.host,self.ep.port)
#				self.conn = Connection( broker,reconnect= True,tcp_nodelay=True)
#				self.conn.open()
#				self.ssn = self.conn.session()
#				self.snd = self.ssn.sender(self.ep.addr)
            d = m.marshall()
            m = Message(d)
            self.snd.send(m, False)
        except:
            log_error(traceback.format_exc())
            # self.conn = None
            return False
        return True
コード例 #24
0
ファイル: ha_tests.py プロジェクト: kgiusti/qpid-cpp-debian
    def test_priority_ring(self):
        primary = HaBroker(self, name="primary")
        primary.promote()
        backup = HaBroker(self, name="backup", brokers_url=primary.host_port())
        s = primary.connect().session().sender(
            "q; {create:always, node:{x-declare:{arguments:{'qpid.policy_type':ring, 'qpid.max_count':5, 'qpid.priorities':10}}}}"
        )
        priorities = [8, 9, 5, 1, 2, 2, 3, 4, 9, 7, 8, 9, 9, 2]
        for p in priorities:
            s.send(Message(priority=p))

        # FIXME aconway 2012-02-22: there is a bug in priority ring
        # queues that allows a low priority message to displace a high
        # one. The following commented-out assert_browse is for the
        # correct result, the uncommented one is for the actualy buggy
        # result.  See https://issues.apache.org/jira/browse/QPID-3866
        #
        # expect = sorted(priorities,reverse=True)[0:5]
        expect = [9, 9, 9, 9, 2]
        primary.assert_browse("q", expect, transform=lambda m: m.priority)
        backup.assert_browse_backup("q",
                                    expect,
                                    transform=lambda m: m.priority)
コード例 #25
0
ファイル: conn_qpid.py プロジェクト: adoggie/PyDawn
 def produce(self,message):
     message = Message(message)
     self.producer.send(message, False)
コード例 #26
0
 def put(self, msg_id):
     mq_parser = request_mq_message()
     args = mq_parser.parse_args()
     msg = {'msg_num': args['msg_num'], 'msg_texto': args['msg_texto']}
     g.mq_sender.submit(Message(msg))
     return msg, 201
コード例 #27
0
ファイル: _qpid.py プロジェクト: cetres/enviosms
 def _enviar(self, mensagem):
     logger.debug("Sending a message")
     m = Message(mensagem)
     self._sender.send(m, True, self._timeout)
コード例 #28
0
ファイル: amqp.py プロジェクト: lhm-limux/gosa
 def send(self, data):
     self.log.debug("sending event: %s" % data)
     msg = Message(data)
     msg.user_id = self.__user
     return self.__sender.send(msg)
コード例 #29
0
ファイル: ha_tests.py プロジェクト: kgiusti/qpid-cpp-debian
 def send(key, value):
     s.send(Message(content=value, properties={"lvq-key": key}))
コード例 #30
0
ファイル: messaging.py プロジェクト: KeithLatteri/awips2
 def testContentTypeOverride(self):
   m = Message()
   m.content_type = "text/html; charset=utf8"
   m.content = u"<html/>"
   assert m.content_type == "text/html; charset=utf8"
コード例 #31
0
ファイル: producer.py プロジェクト: zhangqiusheng/Nitrate
        if self.connection is not None:
            Producer._connection.close()
            Producer._connection = None

    def send(self, msg, routing_key, sync=True):
        ''' Send a message to QPID broker.  '''

        try:
            self.__connect_broker_if_necessary()

        except AuthenticationFailure, err:
            errlog_writeline(
                'AuthenticationError. Please check settings\'s configuration '
                'and your authentication environment. Error message: ' +
                str(err))

        except ConnectError, err:
            errlog_writeline('ConnectError. ' + str(err))
            return

        try:
            o_msg = Message(msg, subject=routing_key)
            self.sender.send(o_msg, sync=sync)

        except ConnectionError, err:
            errlog_writeline('ConnectionError %s while sending message %s.' %
                             (str(err), str(o_msg)))

            self.stop()
コード例 #32
0
    def _test_unsolicited_updates(self):
        """ Verify that the Console callbacks work
        """
        class Handler(qmf.console.Console):
            def __init__(self):
                self.v1_oids = 0
                self.v1_events = 0
                self.v2_oids = 0
                self.v2_events = 0
                self.broker_info = []
                self.broker_conn = []
                self.newpackage = []
                self.newclass = []
                self.agents = []
                self.events = []
                self.updates = {}  # holds the objects by OID
                self.heartbeats = []

            def brokerInfo(self, broker):
                #print "brokerInfo:", broker
                self.broker_info.append(broker)

            def brokerConnected(self, broker):
                #print "brokerConnected:", broker
                self.broker_conn.append(broker)

            def newPackage(self, name):
                #print "newPackage:", name
                self.newpackage.append(name)

            def newClass(self, kind, classKey):
                #print "newClass:", kind, classKey
                self.newclass.append((kind, classKey))

            def newAgent(self, agent):
                #print "newAgent:", agent
                self.agents.append(agent)

            def event(self, broker, event):
                #print "EVENT %s" % event
                self.events.append(event)
                if event.isV2:
                    self.v2_events += 1
                else:
                    self.v1_events += 1

            def heartbeat(self, agent, timestamp):
                #print "Heartbeat %s" % agent
                self.heartbeats.append((agent, timestamp))

            # generic handler for objectProps and objectStats
            def _handle_obj_update(self, record):
                oid = record.getObjectId()
                if oid.isV2:
                    self.v2_oids += 1
                else:
                    self.v1_oids += 1

                if oid not in self.updates:
                    self.updates[oid] = record
                else:
                    self.updates[oid].mergeUpdate(record)

            def objectProps(self, broker, record):
                assert len(record.getProperties()
                           ), "objectProps() invoked with no properties?"
                self._handle_obj_update(record)

            def objectStats(self, broker, record):
                assert len(record.getStatistics()
                           ), "objectStats() invoked with no properties?"
                self._handle_obj_update(record)

        handler = Handler()
        self._myStartQmf(self.broker, handler)
        # this should force objectProps, queueDeclare Event callbacks
        self._create_queue("fleabag", {"auto-delete": True})
        # this should force objectStats callback
        self.broker.send_message("fleabag", Message("Hi"))
        # and we should get a few heartbeats
        sleep(self.PUB_INTERVAL)
        self.broker.send_message("fleabag", Message("Hi"))
        sleep(self.PUB_INTERVAL)
        self.broker.send_message("fleabag", Message("Hi"))
        sleep(self.PUB_INTERVAL * 2)

        assert handler.broker_info, "No BrokerInfo callbacks received"
        assert handler.broker_conn, "No BrokerConnected callbacks received"
        assert handler.newpackage, "No NewPackage callbacks received"
        assert handler.newclass, "No NewClass callbacks received"
        assert handler.agents, "No NewAgent callbacks received"
        assert handler.events, "No event callbacks received"
        assert handler.updates, "No updates received"
        assert handler.heartbeats, "No heartbeat callbacks received"

        # now verify updates for queue "fleabag" were received, and the
        # msgDepth statistic is correct

        msgs = 0
        for o in handler.updates.itervalues():
            key = o.getClassKey()
            if key and key.getClassName() == "queue" and o.name == "fleabag":
                assert o.msgDepth, "No update to msgDepth statistic!"
                msgs = o.msgDepth
                break
        assert msgs == 3, "msgDepth statistics not accurate!"

        # verify that the published objects were of the correct QMF version
        if self._broker_is_v1:
            assert handler.v1_oids and handler.v2_oids == 0, "QMFv2 updates received while in V1-only mode!"
            assert handler.v1_events and handler.v2_events == 0, "QMFv2 events received while in V1-only mode!"
        else:
            assert handler.v2_oids and handler.v1_oids == 0, "QMFv1 updates received while in V2-only mode!"
            assert handler.v2_events and handler.v1_events == 0, "QMFv1 events received while in V2-only mode!"
コード例 #33
0
        return True


qb_msg = {
    'LIST': [{
        'UPDATE_TIME': '2016-11-30 10:33:35',
        'DATA_SOURCE': 'XIGNITE',
        'SYMBOL': 'USDCNY',
        'ASK_PRICE': 6.884,
        'SPREAD': 0.0006,
        'SN': 9713930,
        'BID_PRICE': 6.8834
    }, {
        'UPDATE_TIME': '2016-11-30 10:33:36',
        'DATA_SOURCE': 'XIGNITE',
        'SYMBOL': 'USDCNY',
        'ASK_PRICE': 6.885,
        'SPREAD': 0.0007,
        'SN': 9713931,
        'BID_PRICE': 6.8835
    }]
}

if __name__ == '__main__':
    publisher = FanoutPublisher('192.168.1.234',
                                'fxspot.cdh2qb.push.fanout.test')
    publisher.publish(Message(content=qb_msg,
                              content_type=TYPE_MAPPINGS[dict]))
    publisher.destroy()
    print 'done.'
コード例 #34
0
ファイル: amqp_proxy.py プロジェクト: lhm-limux/gosa
    def __call__(self, *args, **kwargs):
        if len(kwargs) > 0 and len(args) > 0:
            raise JSONRPCException("JSON-RPC does not support positional and keyword arguments at the same time")

        # Default to 'core' queue, pylint: disable=W0612
        queue = "core"

        if AMQPServiceProxy.methods[self.__serviceAddress]:
            if not self.__serviceName in AMQPServiceProxy.methods[self.__serviceAddress]:
                raise NameError("name '%s' not defined" % self.__serviceName)

            if AMQPServiceProxy.domain:
                queue = AMQPServiceProxy.methods[self.__serviceAddress][self.__serviceName]['target']
            else:
                queue = self.__serviceAddress

        # Find free session for requested queue
        for sess, dsc in AMQPServiceProxy.worker[self.__serviceAddress].iteritems():
            if not dsc['locked']:
                self.__ssn = dsc['ssn']
                self.__sender = dsc['sender']
                self.__receiver = dsc['receiver']
                self.__worker = sess
                dsc['locked'] = True
                break

        # No free session?
        if not self.__ssn:
            raise AMQPException('no free session - increase workers')

        # Send
        if len(kwargs):
            postdata = dumps({"method": self.__serviceName, 'params': kwargs, 'id': 'jsonrpc'})
        else:
            postdata = dumps({"method": self.__serviceName, 'params': args, 'id': 'jsonrpc'})

        message = Message(postdata)
        message.user_id = self.__URL['user']
        message.reply_to = 'reply-%s' % self.__ssn.name
        self.__sender.send(message)

        # Get response
        respdata = self.__receiver.fetch()
        resp = loads(respdata.content)
        self.__ssn.acknowledge(respdata)

        if resp['error'] != None:
            AMQPServiceProxy.worker[self.__serviceAddress][self.__worker]['locked'] = False
            raise JSONRPCException(resp['error'])

        else:
            # Look for json class hint
            if "result" in resp and \
                isinstance(resp["result"], DictType) and \
                "__jsonclass__" in resp["result"] and \
                resp["result"]["__jsonclass__"][0] == "json.ObjectFactory":

                resp = resp["result"]
                jc = resp["__jsonclass__"][1]
                del resp["__jsonclass__"]

                # Extract property presets
                data = {}
                for prop in resp:
                    data[prop] = resp[prop]

                jc.insert(0, AMQPServiceProxy(self.__serviceURL,
                    self.__serviceAddress, None, self.__conn,
                    workers=self.__workers))
                jc.append(data)
                AMQPServiceProxy.worker[self.__serviceAddress][self.__worker]['locked'] = False
                return ObjectFactory.get_instance(*jc)

            AMQPServiceProxy.worker[self.__serviceAddress][self.__worker]['locked'] = False
            return resp['result']
コード例 #35
0
ファイル: ha_tests.py プロジェクト: kgiusti/qpid-cpp-debian
    def test_replication(self):
        """Test basic replication of configuration and messages before and
        after backup has connected"""
        def queue(name, replicate):
            return "%s;{create:always,node:{x-declare:{arguments:{'qpid.replicate':%s}}}}" % (
                name, replicate)

        def exchange(name, replicate, bindq):
            return "%s;{create:always,node:{type:topic,x-declare:{arguments:{'qpid.replicate':%s}, type:'fanout'},x-bindings:[{exchange:'%s',queue:'%s'}]}}" % (
                name, replicate, name, bindq)

        def setup(p, prefix, primary):
            """Create config, send messages on the primary p"""
            s = p.sender(queue(prefix + "q1", "all"))
            for m in ["a", "b", "1"]:
                s.send(Message(m))
            # Test replication of dequeue
            self.assertEqual(
                p.receiver(prefix + "q1").fetch(timeout=0).content, "a")
            p.acknowledge()
            p.sender(queue(prefix + "q2", "configuration")).send(Message("2"))
            p.sender(queue(prefix + "q3", "none")).send(Message("3"))
            p.sender(exchange(prefix + "e1", "all",
                              prefix + "q1")).send(Message("4"))
            p.sender(exchange(prefix + "e2", "all",
                              prefix + "q2")).send(Message("5"))
            # Test  unbind
            p.sender(queue(prefix + "q4", "all")).send(Message("6"))
            s3 = p.sender(exchange(prefix + "e4", "all", prefix + "q4"))
            s3.send(Message("7"))
            # Use old connection to unbind
            us = primary.connect_old().session(str(uuid4()))
            us.exchange_unbind(exchange=prefix + "e4",
                               binding_key="",
                               queue=prefix + "q4")
            p.sender(prefix + "e4").send(Message("drop1"))  # Should be dropped
            # Need a marker so we can wait till sync is done.
            p.sender(queue(prefix + "x", "configuration"))

        def verify(b, prefix, p):
            """Verify setup was replicated to backup b"""
            # Wait for configuration to replicate.
            wait_address(b, prefix + "x")
            self.assert_browse_retry(b, prefix + "q1", ["b", "1", "4"])

            self.assertEqual(
                p.receiver(prefix + "q1").fetch(timeout=0).content, "b")
            p.acknowledge()
            self.assert_browse_retry(b, prefix + "q1", ["1", "4"])

            self.assert_browse_retry(b, prefix + "q2",
                                     [])  # configuration only
            assert not valid_address(b, prefix + "q3")
            b.sender(prefix + "e1").send(
                Message(prefix + "e1"))  # Verify binds with replicate=all
            self.assert_browse_retry(b, prefix + "q1",
                                     ["1", "4", prefix + "e1"])
            b.sender(prefix + "e2").send(
                Message(prefix +
                        "e2"))  # Verify binds with replicate=configuration
            self.assert_browse_retry(b, prefix + "q2", [prefix + "e2"])

            b.sender(prefix + "e4").send(Message("drop2"))  # Verify unbind.
            self.assert_browse_retry(b, prefix + "q4", ["6", "7"])

        primary = HaBroker(self, name="primary")
        primary.promote()
        p = primary.connect().session()

        # Create config, send messages before starting the backup, to test catch-up replication.
        setup(p, "1", primary)
        backup = HaBroker(self, name="backup", brokers_url=primary.host_port())
        # Create config, send messages after starting the backup, to test steady-state replication.
        setup(p, "2", primary)

        # Verify the data on the backup
        b = backup.connect_admin().session()
        verify(b, "1", p)
        verify(b, "2", p)
        # Test a series of messages, enqueue all then dequeue all.
        s = p.sender(queue("foo", "all"))
        wait_address(b, "foo")
        msgs = [str(i) for i in range(10)]
        for m in msgs:
            s.send(Message(m))
        self.assert_browse_retry(p, "foo", msgs)
        self.assert_browse_retry(b, "foo", msgs)
        r = p.receiver("foo")
        for m in msgs:
            self.assertEqual(m, r.fetch(timeout=0).content)
        p.acknowledge()
        self.assert_browse_retry(p, "foo", [])
        self.assert_browse_retry(b, "foo", [])

        # Another series, this time verify each dequeue individually.
        for m in msgs:
            s.send(Message(m))
        self.assert_browse_retry(p, "foo", msgs)
        self.assert_browse_retry(b, "foo", msgs)
        for i in range(len(msgs)):
            self.assertEqual(msgs[i], r.fetch(timeout=0).content)
            p.acknowledge()
            self.assert_browse_retry(p, "foo", msgs[i + 1:])
            self.assert_browse_retry(b, "foo", msgs[i + 1:])