Beispiel #1
0
    def test_drain_nowait(self):
        c = Connection(transport=Mock)
        c.drain_events = Mock()
        c.drain_events.side_effect = socket.timeout()

        c.more_to_read = True
        self.assertFalse(c.drain_nowait())
        self.assertFalse(c.more_to_read)

        c.drain_events.side_effect = socket.error()
        c.drain_events.side_effect.errno = errno.EAGAIN
        c.more_to_read = True
        self.assertFalse(c.drain_nowait())
        self.assertFalse(c.more_to_read)

        c.drain_events.side_effect = socket.error()
        c.drain_events.side_effect.errno = errno.EPERM
        with self.assertRaises(socket.error):
            c.drain_nowait()

        c.more_to_read = False
        c.drain_events = Mock()
        self.assertTrue(c.drain_nowait())
        c.drain_events.assert_called_with(timeout=0)
        self.assertTrue(c.more_to_read)
Beispiel #2
0
    def test_drain_nowait(self):
        c = Connection(transport=Mock)
        c.drain_events = Mock()
        c.drain_events.side_effect = socket.timeout()

        c.more_to_read = True
        self.assertFalse(c.drain_nowait())
        self.assertFalse(c.more_to_read)

        c.drain_events.side_effect = socket.error()
        c.drain_events.side_effect.errno = errno.EAGAIN
        c.more_to_read = True
        self.assertFalse(c.drain_nowait())
        self.assertFalse(c.more_to_read)

        c.drain_events.side_effect = socket.error()
        c.drain_events.side_effect.errno = errno.EPERM
        with self.assertRaises(socket.error):
            c.drain_nowait()

        c.more_to_read = False
        c.drain_events = Mock()
        self.assertTrue(c.drain_nowait())
        c.drain_events.assert_called_with(timeout=0)
        self.assertTrue(c.more_to_read)
class KombuConsumer(
        AbstractConsumer, ):
    """
    使用kombu作为中间件,这个能直接一次性支持很多种小众中间件,但性能很差,除非是分布式函数调度框架没实现的中间件种类用户才可以用这种,用户也可以自己对比性能。
    """

    BROKER_KIND = 15

    def custom_init(self):
        self._middware_name = frame_config.KOMBU_URL.split(":")[0]
        logger_name = f'{self._logger_prefix}{self.__class__.__name__}--{self._middware_name}--{self._queue_name}'
        self.logger = LogManager(logger_name).get_logger_and_add_handlers(
            self._log_level,
            log_filename=f'{logger_name}.log'
            if self._create_logger_file else None,
            formatter_template=frame_config.
            NB_LOG_FORMATER_INDEX_FOR_CONSUMER_AND_PUBLISHER,
        )  #
        patch_kombu_redis()

    # noinspection DuplicatedCode
    def _shedual_task(self):  # 这个倍while 1 启动的,会自动重连。
        def callback(body: dict,
                     message: kombu.transport.virtual.base.Message):
            # print(type(body),body,type(message),message)
            self._print_message_get_from_broker('kombu', body)
            # self.logger.debug(f""" 从 kombu {self._middware_name} 中取出的消息是 {body}""")
            kw = {
                'body': body,
                'message': message,
            }
            self._submit_task(kw)

        self.exchange = Exchange('distributed_framework_exchange',
                                 'direct',
                                 durable=True)
        self.queue = Queue(self._queue_name,
                           exchange=self.exchange,
                           routing_key=self._queue_name,
                           auto_delete=False)
        self.conn = Connection(frame_config.KOMBU_URL,
                               transport_options={"visibility_timeout":
                                                  600})  # 默认3600秒unacked重回队列
        self.queue(self.conn).declare()
        with self.conn.Consumer(self.queue,
                                callbacks=[callback],
                                no_ack=False,
                                prefetch_count=100) as consumer:
            # Process messages and handle events on all channels
            channel = consumer.channel  # type:Channel
            channel.body_encoding = 'no_encode'  # 这里改了编码,存到中间件的参数默认把消息base64了,我觉得没必要不方便查看消息明文。
            while True:
                self.conn.drain_events()

    def _confirm_consume(self, kw):
        pass  # redis没有确认消费的功能。
        kw['message'].ack()

    def _requeue(self, kw):
        kw['message'].requeue()
def run_pulse_listener(username, password, timeout, no_send):
    """Run a Pulse message queue listener."""
    connection = Connection(
        hostname='pulse.mozilla.org',
        port=5671,
        ssl=True,
        userid=username,
        password=password,
    )

    # Connect and pass in our own low value for retries so the connection
    # fails fast if there is a problem.
    connection.ensure_connection(
        max_retries=1
    )  # Retries must be >=1 or it will retry forever.

    with closing(connection):
        hgpush_exchange = Exchange(config.PULSE_EXCHANGE, 'topic', channel=connection)

        # Pulse queue names need to be prefixed with the username
        queue_name = f'queue/{username}/{config.PULSE_QUEUE_NAME}'
        queue = Queue(
            queue_name,
            exchange=hgpush_exchange,
            routing_key=config.PULSE_QUEUE_ROUTING_KEY,
            durable=True,
            exclusive=False,
            auto_delete=False,
            channel=connection,
        )

        # Passing passive=True will assert that the exchange exists but won't
        #  try to declare it.  The Pulse server forbids declaring exchanges.
        hgpush_exchange.declare(passive=True)

        # Queue.declare() also declares the exchange, which isn't allowed by
        # the Pulse server. Use the low-level Queue API to only declare the
        # queue itself.
        queue.queue_declare()
        queue.queue_bind()

        callback = partial(process_push_message, no_send=no_send)

        # Pass auto_declare=False so that Consumer does not try to declare the
        # exchange.  Declaring exchanges is not allowed by the Pulse server.
        with connection.Consumer(
            queue, callbacks=[callback], auto_declare=False
        ) as consumer:

            if no_send:
                log.info('transmission of ping data has been disabled')
                log.info('message acks has been disabled')

            log.info('reading messages')
            try:
                connection.drain_events(timeout=timeout)
            except socket.timeout:
                log.info('message queue is empty, nothing to do')

    log.info('done')
Beispiel #5
0
 def run(self):
     connection = Connection(**connection_params)
     channel = connection.channel()
     channel.basic_qos(prefetch_size=0, prefetch_count=1, a_global=True)
     while True:
         with Consumer(channel, queues, callbacks=[self.on_message]):
             connection.drain_events()
Beispiel #6
0
def consumer():
    rabbit_url = "amqp://localhost:5672/"
    conn = Connection(rabbit_url)
    exchange = Exchange("scrapy", type="direct")
    queue = Queue(name="quotation", exchange=exchange, routing_key="quotes")
    with Consumer(conn, queues=queue, callbacks=[process_message], accept=["json"]): 
        conn.drain_events(timeout=2)
    return "consumed successfully"
Beispiel #7
0
class _AMQPServerWrapper(object):
    def __init__(self, amqp_url, logs):
        self.__logs = logs
        self.__amqp_url = amqp_url
        self.__monitors = {}
        self.__connection = Connection(self.__amqp_url)
        self.__connection.connect()
        self.__running = True
        self.__consumer_gl = gevent.spawn(self.__consumer_greenlet_main)
        self.__consumer_gl.greenlet_name = 'amqp-consumer-gl'  # allowing flogging to print a nice name
        gevent.sleep(0.0)

    def __consumer_greenlet_main(self):
        gevent.sleep(0)
        while self.__running:
            try:
                self.__connection.drain_events(timeout=0.5)
            except Exception as ex:     # NOQA: assigned but not used (left in for super-duper-low-level-debug)
                # print("was woken because {}".format(ex))
                pass
            gevent.sleep(0.1)  # make -sure- to yield cpu...
            # print("---loop")

    def stop_greenlet(self):
        self.__running = False

    @property
    def connected(self):
        return self.__connection.connected

    def create_add_tracker(self, exchange, routing_key, event_cb, queue_name=None):
        self.__logs.irl.debug("AMQPServerWrapper: create_add_tracker ex=%s, rk=%s, event_cb=%s",
                              exchange, routing_key, event_cb)
        mon = _KeyedConsumerHandler.get_keyed_consumer(
            self.__logs, self.__connection, exchange, routing_key, queue_name, event_cb)
        return mon.exchange

    def inject(self, exchange, routing_key, payload):
        self.__logs.irl.debug("Injecting a test AMQP message: ex=%s, rk=%s, payload=%s", exchange, routing_key, payload)
        if not isinstance(exchange, Exchange):
            exchange = Exchange(exchange, 'topic')
        prod = Producer(self.__connection, exchange=exchange, routing_key=routing_key)
        prod.publish(payload)

    def test_helper_sync_send_msg(self, exchange, ex_rk, send_rk, payload):
        ex = Exchange(exchange, 'topic')
        queue = Queue(exchange=ex, routing_key=ex_rk + '.*', exclusive=True, channel=self.__connection)
        queue.declare()
        prod = Producer(self.__connection, exchange=ex, routing_key=send_rk)
        prod.publish(payload)
        return queue

    def test_helper_sync_recv_msg(self, queue):
        for tick in range(10):
            msg = queue.get()
            if msg is not None:
                break
        return msg
def worker(mq_url):
    connection = Connection(mq_url)
    channel = connection.channel()
    consumer_json = Consumer(channel, task_json_queue, callbacks=[process_json], accept=["json"])
    consumer_json.consume()
    consumer_pickle = Consumer(channel, task_pickle_queue, callbacks=[process_pickle], accept=["pickle"])
    consumer_pickle.consume()
    while True:
        connection.drain_events()
Beispiel #9
0
class ClientMQ(object):
    connection = None
    nodeName = None
    callback = None

    def __init__(self, host, vhost, username, password,  nodeName, callback):
        self.init_connect(host, vhost, username, password)
        self.nodeName = nodeName
        self.callback = callback
        self.queue = ThreadQueue()
        self.produder = ClientProducer(self.connection, 'HOST')
        self.initRecv()

        self.startThread()

    def init_connect(self, host, vhost, username, password):
        self.connection = Connection(hostname=host, virtual_host=vhost, userid=username, password=password)

    def initRecv(self):
        self.consumer = ClientConsumer(self.connection, self.nodeName, self.callback)
        while not self.consumer.init():
            time.sleep(1)

        def start_consume():
            while True:
                self.connection.drain_events()

        t = Thread(target=start_consume)
        t.start()

    def startThread(self):
        def start_produce():
            while True:
                try:
                    msg = self.queue.get()
                    while not self.produder.publish(message=msg):
                        time.sleep(1)
                except Exception as e:
                    logger.exception(e)

        t = Thread(target=start_produce)
        t.start()

    def is_open(self):
        if self.connection:
            return self.connection.connected
        else:
            return False

    def update_bean(self, cls_name, content):
        msg = '1' + to_unicode(cls_name) + ':' + to_unicode(content)
        self.queue.put(msg)

    def delete_bean(self, cls_name, ids):
        msg = '0' + to_unicode(cls_name) + ':' + to_unicode(ids)
        self.queue.put(msg)
Beispiel #10
0
class DBwriter:
    def __init__(self, broker_cloud, host_influxdb):
        self.clientDB = InfluxDBClient(host_influxdb, 8086, 'root', 'root',
                                       'Collector_DB')
        self.clientDB.create_database('Collector_DB')

        self.consumer_connection = Connection(broker_cloud)
        self.exchange = Exchange("IoT", type="direct")

    def write_db(self, data_points):

        print(len(data_points))
        for point in data_points:
            record = [{
                'measurement': point['MetricId'],
                'tags': {
                    'DataType': point['DataType'],
                },
                'fields': {
                    'Value': point['Value'],
                },
                'time': point['TimeCollect']
            }]
            try:
                self.clientDB.write_points(record)
                print(datetime.now().strftime('%Y-%m-%d %H:%M:%S') +
                      ': Updated Database')
            except:
                print("Can't write to database : {}".format(point['MetricId']))
                print("Delete mesurement....")
                self.clientDB.drop_measurement(measurement=point['MetricId'])

    def api_write_db(self, body, message):
        data_points = json.loads(body)['body']['data_points']
        self.write_db(data_points)

    def run(self):
        queue_write_db = Queue(name='dbwriter.request.api_write_db',
                               exchange=self.exchange,
                               routing_key='dbwriter.request.api_write_db',
                               message_ttl=20)
        while 1:
            try:
                self.consumer_connection.ensure_connection(max_retries=1)
                with Consumer(self.consumer_connection,
                              queues=queue_write_db,
                              callbacks=[self.api_write_db],
                              no_ack=True):
                    while True:
                        self.consumer_connection.drain_events()
            except (ConnectionRefusedError, exceptions.OperationalError):
                print('Connection lost')
            except self.consumer_connection.connection_errors:
                print('Connection error')
 def listen(self):
     logger.debug('AutophonePulseMonitor: start shared_lock.acquire')
     connection = None
     restart = True
     while restart:
         restart = False
         self.shared_lock.acquire()
         try:
             # connection does not connect to the server until
             # either the connection.connect() method is called
             # explicitly or until kombu calls it implicitly as
             # needed.
             connection = Connection(hostname=self.hostname,
                                     userid=self.userid,
                                     password=self.password,
                                     virtual_host=self.virtual_host,
                                     port=DEFAULT_SSL_PORT,
                                     ssl=True)
             consumer = connection.Consumer(self.queues,
                                            callbacks=[self.handle_message],
                                            accept=['json'],
                                            auto_declare=False)
             for queue in self.queues:
                 queue(connection).queue_declare(passive=False)
                 queue(connection).queue_bind()
             with consumer:
                 while not self._stopping.is_set():
                     try:
                         logger.debug('AutophonePulseMonitor shared_lock.release')
                         self.shared_lock.release()
                         connection.drain_events(timeout=self.timeout)
                     except socket.timeout:
                         pass
                     except KeyboardInterrupt:
                         raise
                     finally:
                         logger.debug('AutophonePulseMonitor shared_lock.acquire')
                         self.shared_lock.acquire()
             logger.debug('AutophonePulseMonitor.listen: stopping')
         except:
             logger.exception('AutophonePulseMonitor Exception')
             if connection:
                 connection.release()
             restart = True
             time.sleep(1)
         finally:
             logger.debug('AutophonePulseMonitor exit shared_lock.release')
             if connection and not restart:
                 connection.release()
             self.shared_lock.release()
Beispiel #12
0
    def test_accept__content_disallowed(self):
        conn = Connection('memory://')
        q = Queue('foo', exchange=self.exchange)
        p = conn.Producer()
        p.publish(
            {'complex': object()},
            declare=[q], exchange=self.exchange, serializer='pickle',
        )

        callback = Mock(name='callback')
        with conn.Consumer(queues=[q], callbacks=[callback]) as consumer:
            with self.assertRaises(consumer.ContentDisallowed):
                conn.drain_events(timeout=1)
        self.assertFalse(callback.called)
    def test_accept__content_disallowed(self):
        conn = Connection('memory://')
        q = Queue('foo', exchange=self.exchange)
        p = conn.Producer()
        p.publish(
            {'complex': object()},
            declare=[q], exchange=self.exchange, serializer='pickle',
        )

        callback = Mock(name='callback')
        with conn.Consumer(queues=[q], callbacks=[callback]) as consumer:
            with self.assertRaises(consumer.ContentDisallowed):
                conn.drain_events(timeout=1)
        callback.assert_not_called()
def worker(mq_url):
    connection = Connection(mq_url)
    channel = connection.channel()
    consumer_json = Consumer(channel,
                             task_json_queue,
                             callbacks=[process_json],
                             accept=['json'])
    consumer_json.consume()
    consumer_pickle = Consumer(channel,
                               task_pickle_queue,
                               callbacks=[process_pickle],
                               accept=['pickle'])
    consumer_pickle.consume()
    while True:
        connection.drain_events()
Beispiel #15
0
 def run(self):
     try:
         connection = Connection(hostname=self.host,port=self.port,userid=self.usr,password=self.psw,virtual_host=self.virtual_host)
         channel = connection.channel()
         self.producer=Producer(channel)
         task_queue = Queue(self.queue_name,durable=True)
         consumer = Consumer(channel,task_queue,no_ack=False)
         consumer.qos(prefetch_count=1)
         consumer.register_callback(self.RequestCallBack)
         consumer.consume()
         while True:
             connection.drain_events()
         connection.close()
     except BaseException,e:
         print e
    def test_accept__content_allowed(self):
        conn = Connection('memory://')
        q = Queue('foo', exchange=self.exchange)
        p = conn.Producer()
        p.publish(
            {'complex': object()},
            declare=[q], exchange=self.exchange, serializer='pickle',
        )

        callback = Mock(name='callback')
        with conn.Consumer(queues=[q], accept=['pickle'],
                           callbacks=[callback]):
            conn.drain_events(timeout=1)
        callback.assert_called()
        body, message = callback.call_args[0]
        self.assertTrue(body['complex'])
Beispiel #17
0
    def test_accept__content_allowed(self):
        conn = Connection('memory://')
        q = Queue('foo', exchange=self.exchange)
        p = conn.Producer()
        p.publish(
            {'complex': object()},
            declare=[q], exchange=self.exchange, serializer='pickle',
        )

        callback = Mock(name='callback')
        with conn.Consumer(queues=[q], accept=['pickle'],
                           callbacks=[callback]):
            conn.drain_events(timeout=1)
        self.assertTrue(callback.called)
        body, message = callback.call_args[0]
        self.assertTrue(body['complex'])
Beispiel #18
0
class Consumer:
    def __init__(self, queue):
        self.conn = Connection('amqp://*****:*****@rabbit:5672//')
        self.task_queue = Queue(queue, routing_key=queue)

    def start_consumer(self):
        consumer = Consumer(self.conn, [self.task_queue],
                            callbacks=[self.process_message])
        consumer.consume()

        self.conn.drain_events()

    @staticmethod
    def process_message(body, message):
        print(body['message']['Value'], flush=True)
        message.ack()
def consumer():
    rabbit_host = "amqp://{host}:5672/".format(host=sys.argv[1])
    print("Connection to {host}".format(host=rabbit_host))
    
    # Create the connection
    conn = Connection(rabbit_host)
    
    # Create the exchange
    test_exchange = Exchange("test_exchange", type="direct")
    
    # Create the queue
    queue = Queue(name="queue", exchange=test_exchange, routing_key="test")
    
    # Create the consumer
    with Consumer(conn, queues=queue, callbacks=[process_message], accept=["text/plain"]):
        conn.drain_events()
Beispiel #20
0
    def start(self):
        log.info("Listening for Pulse messages")
        self.running = True

        connection = Connection(
            hostname=self.pulse_host,
            userid=self.pulse_user,
            password=self.pulse_password,
            ssl=True,
            # Kombu doesn't support the port correctly for amqp with ssl...
            port=5671,
        )
        consumers = []
        for event in self.events:
            log.debug("Setting up queue on exchange: %s with routing_key: %s", event.exchange, event.routing_key)
            # Passive exchanges must be used, otherwise kombu will try to
            # create the exchange (which we don't want, we're consuming
            # an existing one!)
            e = Exchange(name=event.exchange, type="topic", passive=True)
            q = Queue(
                name=event.queue_name,
                exchange=e,
                routing_key=event.routing_key,
                durable=True,
                exclusive=False,
                auto_delete=False
            )
            c = connection.Consumer(
                queues=[q],
                callbacks=[event.callback]
            )
            c.consume()
            consumers.append(c)

        try:
            # XXX: drain_events only returns after receiving a message. Is
            # there a way we can have it return regularly to be non-blocking?
            # Its timeout parameter seems to break receiving of messages.
            # Maybe it doesn't matter if we can't shut down gracefully since
            # messages will be reprocessed next time.
            while self.running:
                connection.drain_events()
        finally:
            for c in consumers:
                c.close()
            connection.close()
Beispiel #21
0
    def start(self):
        log.info("Listening for Pulse messages")
        self.running = True

        connection = Connection(
            hostname=self.pulse_host,
            userid=self.pulse_user,
            password=self.pulse_password,
            ssl=True,
            # Kombu doesn't support the port correctly for amqp with ssl...
            port=5671,
        )
        consumers = []
        for event in self.events:
            log.debug("Setting up queue on exchange: %s with routing_key: %s", event.exchange, event.routing_key)
            # Passive exchanges must be used, otherwise kombu will try to
            # create the exchange (which we don't want, we're consuming
            # an existing one!)
            e = Exchange(name=event.exchange, type="topic", passive=True)
            q = Queue(
                name=event.queue_name,
                exchange=e,
                routing_key=event.routing_key,
                durable=True,
                exclusive=False,
                auto_delete=False
            )
            c = connection.Consumer(
                queues=[q],
                callbacks=[event.callback]
            )
            c.consume()
            consumers.append(c)

        try:
            # XXX: drain_events only returns after receiving a message. Is
            # there a way we can have it return regularly to be non-blocking?
            # Its timeout parameter seems to break receiving of messages.
            # Maybe it doesn't matter if we can't shut down gracefully since
            # messages will be reprocessed next time.
            while self.running:
                connection.drain_events()
        finally:
            for c in consumers:
                c.close()
            connection.close()
Beispiel #22
0
class Amqp(object):
    def __init__(self, url, exchange, queue, routing_key):

        self.conn = Connection(url)
        self.exchange = Exchange(exchange, 'direct')
        self.routing_key = routing_key
        self.queue = Queue(queue, self.exchange, self.routing_key)

        self.producer = None
        self.consumer = None

    def send(self, obj):
        if not self.producer:
            self.producer = self.conn.Producer()
        self.producer.publish(obj,
                              exchange=self.exchange,
                              routing_key=self.routing_key,
                              declare=[self.queue],
                              serializer='json',
                              compression='zlib')

    def poll(self, cb_func):
        if not self.consumer:
            self.consumer = self.conn.Consumer(self.queue, callbacks=[cb_func])
            self.consumer.qos(prefetch_count=1)
        self.consumer.consume()
        while True:
            self.conn.drain_events()

    def _release(self):
        if self.consumer:
            self.consumer.close()
            self.consumer = None
        if self.producer:
            self.producer.close()
            self.producer = None
        if self.conn:
            self.conn.release()
            self.conn = None

    def __enter__(self):
        return self

    def __exit__(self, exec_type, exc_value, traceback):
        self._release()
Beispiel #23
0
class Actor_1(Actor_Base):
    def __init__(self, actor_name, actor_id):
        Actor_Base.__init__(self, actor_name, actor_id)

        BROKER_CLOUD = "localhost"
        self.producer_connection = Connection(BROKER_CLOUD)
        self.consumer_connection = Connection(BROKER_CLOUD)
        self.exchange = Exchange("IoT", type="direct")
        self.queue_get_states = Queue(name='event_generator.to.' +
                                      str(self.actor_id),
                                      exchange=self.exchange,
                                      routing_key='event_generator.to.' +
                                      str(self.actor_id))  #, message_ttl=20)

        self.db = MySQLdb.connect(host="0.0.0.0", user="******", passwd="root")
        self.cursor = self.db.cursor()

    def execute(self, action_type, action_id):
        pass

    def receiveCallToAction(self):
        def handle_notification(body, message):
            print("Receive Event!")
            action_id = json.loads(body)["action_id"]
            action_name = json.loads(body)["action_name"]
            action_type = json.loads(body)["action_type"]

            self.execute(action_type, action_id)
            print("Executed action: ", action_id)

        # End handle_notification

        try:
            self.consumer_connection.ensure_connection(max_retries=1)
            with nested(
                    Consumer(self.consumer_connection,
                             queues=self.queue_get_states,
                             callbacks=[handle_notification],
                             no_ack=True)):
                while True:
                    self.consumer_connection.drain_events()
        except (ConnectionRefusedError, exceptions.OperationalError):
            print('Connection lost')
        except self.consumer_connection.connection_errors:
            print('Connection error')
Beispiel #24
0
class Amqp(object):

    def __init__(self, url, exchange, queue, routing_key):

        self.conn = Connection(url)
        self.exchange = Exchange(exchange, 'direct')
        self.routing_key = routing_key
        self.queue = Queue(queue, self.exchange, self.routing_key)

        self.producer = None
        self.consumer = None

    def send(self, obj):
        if not self.producer:
            self.producer = self.conn.Producer()
        self.producer.publish(obj, exchange=self.exchange,
                              routing_key=self.routing_key,
                              declare=[self.queue],
                              serializer='json', compression='zlib')

    def poll(self, cb_func):
        if not self.consumer:
            self.consumer = self.conn.Consumer(self.queue,
                                               callbacks=[cb_func])
            self.consumer.qos(prefetch_count=1)
        self.consumer.consume()
        while True:
            self.conn.drain_events()

    def _release(self):
        if self.consumer:
            self.consumer.close()
            self.consumer = None
        if self.producer:
            self.producer.close()
            self.producer = None
        if self.conn:
            self.conn.release()
            self.conn = None

    def __enter__(self):
        return self

    def __exit__(self, exec_type, exc_value, traceback):
        self._release()
Beispiel #25
0
    def run(self):
        connection = Connection(hostname=self.host,port=self.port,userid=self.usr,password=self.psw,virtual_host=self.virtual_host)
        channel = connection.channel()
        self.producer=Producer(channel)

        queueargs={}
        if self.msg_timeout:
            queueargs['x-message-ttl']=self.msg_timeout

        task_queue = Queue(self.queue_name,durable=True,queue_arguments=queueargs if queueargs else None)
        consumer = Consumer(channel,task_queue,no_ack=False)
        consumer.qos(prefetch_count=1)
        consumer.register_callback(self.RequestCallBack)
        consumer.consume()
        while self.task_count:
            connection.drain_events()
            self.task_count-=1
        connection.close()
Beispiel #26
0
 def run(self):
     try:
         connection = Connection(hostname=self.host,
                                 port=self.port,
                                 userid=self.usr,
                                 password=self.psw,
                                 virtual_host=self.virtual_host)
         channel = connection.channel()
         self.producer = Producer(channel)
         task_queue = Queue(self.queue_name, durable=True)
         consumer = Consumer(channel, task_queue, no_ack=False)
         consumer.qos(prefetch_count=1)
         consumer.register_callback(self.RequestCallBack)
         consumer.consume()
         while True:
             connection.drain_events()
         connection.close()
     except BaseException, e:
         print e
Beispiel #27
0
def heartbeat_check():
    print("Heartbeat check")
    rabbit_host = "amqp://{host}:5672/".format(host=sys.argv[1])
    print("Connection to {host}".format(host=rabbit_host))

    # Create the connection
    conn = Connection(rabbit_host)

    # Create the exchange
    test_exchange = Exchange("test_exchange", type="direct")

    # Create the queue
    queue = Queue(name="queue", exchange=test_exchange, routing_key="test")

    while True:
        print("hb ping")
        conn.heartbeat_check(rate=5)
        conn.drain_events(timeout=0.01)
        print("hb pong")
        time.sleep(10)
 def listen(self):
     logger = utils.getLogger()
     connect_timeout = 5
     wait = 30
     connection = None
     restart = True
     while restart:
         restart = False
         try:
             # connection does not connect to the server until
             # either the connection.connect() method is called
             # explicitly or until kombu calls it implicitly as
             # needed.
             logger.debug('AutophonePulseMonitor: Connection()')
             connection = Connection(hostname=self.hostname,
                                     userid=self.userid,
                                     password=self.password,
                                     virtual_host=self.virtual_host,
                                     port=DEFAULT_SSL_PORT,
                                     ssl=True,
                                     connect_timeout=connect_timeout)
             logger.debug('AutophonePulseMonitor: connection.Consumer()')
             consumer = connection.Consumer(self.queues,
                                            callbacks=[self.handle_message],
                                            accept=['json'],
                                            auto_declare=False)
             logger.debug('AutophonePulseMonitor: bind queues')
             for queue in self.queues:
                 queue(connection).queue_declare(passive=False)
                 queue(connection).queue_bind()
             with consumer:
                 while not self._stopping.is_set():
                     try:
                         connection.drain_events(timeout=self.timeout)
                     except socket.timeout:
                         pass
                     except socket.error, e:
                         if "timed out" not in str(e):
                             raise
             logger.debug('AutophonePulseMonitor.listen: stopping')
         except:
 def listen(self):
     logger = utils.getLogger()
     connect_timeout = 5
     wait = 30
     connection = None
     restart = True
     while restart:
         restart = False
         try:
             # connection does not connect to the server until
             # either the connection.connect() method is called
             # explicitly or until kombu calls it implicitly as
             # needed.
             logger.debug('AutophonePulseMonitor: Connection()')
             connection = Connection(hostname=self.hostname,
                                     userid=self.userid,
                                     password=self.password,
                                     virtual_host=self.virtual_host,
                                     port=DEFAULT_SSL_PORT,
                                     ssl=True,
                                     connect_timeout=connect_timeout)
             logger.debug('AutophonePulseMonitor: connection.Consumer()')
             consumer = connection.Consumer(self.queues,
                                            callbacks=[self.handle_message],
                                            accept=['json'],
                                            auto_declare=False)
             logger.debug('AutophonePulseMonitor: bind queues')
             for queue in self.queues:
                 queue(connection).queue_declare(passive=False)
                 queue(connection).queue_bind()
             with consumer:
                 while not self._stopping.is_set():
                     try:
                         connection.drain_events(timeout=self.timeout)
                     except socket.timeout:
                         pass
                     except socket.error, e:
                         if "timed out" not in str(e):
                             raise
             logger.debug('AutophonePulseMonitor.listen: stopping')
         except:
Beispiel #30
0
def run(rabbit_url):
    print rabbit_url
    conn = Connection(rabbit_url)
    conn.ensure_connection()
    conn.connect()
    exchange = Exchange(config.EXCHANGE_NAME, type='direct')
    queue = Queue(name=config.QUEUE_NAME,
                  exchange=exchange,
                  routing_key=config.ROUTING_KEY)
    consumer = Consumer(conn,
                        queues=queue,
                        callbacks=[process_message],
                        accept=['text/plain'])
    consumer.consume()

    while True:
        try:
            print 'drain_events'
            conn.drain_events(timeout=2)  # timeout
        except socket.timeout:
            pass
Beispiel #31
0
class RpcProxy(object):
    def __init__(self, amqp_url):
        self.connection = Connection(amqp_url)
        self.callback_queue = Queue(uuid(), exclusive=True, auto_delete=True)

    def on_response(self, message):
        if message.properties['correlation_id'] == self.correlation_id:
            result = message.payload
            ret = pickle.loads(result)
            if isinstance(ret, Exception):
                raise ret
            else:
                self.response = ret

    def __getattr__(self, name):
        def do_rpc(*args, **kwargs):
            self.response = None
            self.correlation_id = uuid()
            with Producer(self.connection) as producer:
                producer.publish(
                    pickle.dumps((name, args, kwargs)),
                    exchange='',
                    routing_key=settings.RPC_QUEUE,
                    declare=[self.callback_queue],
                    reply_to=self.callback_queue.name,
                    correlation_id=self.correlation_id,
                )

            with Consumer(self.connection,
                          on_message=self.on_response,
                          queues=[self.callback_queue],
                          no_ack=True,
                          accept=['json', 'pickle', 'msgpack']):
                while self.response is None:
                    self.connection.drain_events()
            return self.response

        return do_rpc
Beispiel #32
0
class NlpPipe:
    def __init__(self, conn_str='amqp://*****:*****@localhost/'):
        self.conn_str = conn_str
        self.conn = Connection(conn_str)
        self.callback_queue = Queue(str(uuid()), exclusive=True, auto_delete=True)

    def on_response(self, message):
        if message.properties['correlation_id'] == self.correlation_id:
            res = message.payload
            if isinstance(res, str):
                res = json.loads(res)
            self.response = res

    def process(self, data, steps):
        self.response = None
        self.correlation_id = uuid()
        logger.info('publish queue={} next={} reply_to={}'.format(steps[0], steps[1:] or None, self.callback_queue.name))
        with Producer(self.conn) as producer:
            producer.publish(
                data,
                exchange='',
                routing_key=steps[0],
                # headers={'next': json.loads(json.dumps(steps[1:]))},
                # headers={'next': ('aa', 'bbb')},
                # headers={'next': json.dumps(['aa', 'bbb'])},
                headers={'next': '|'.join(steps[1:])},
                declare=[self.callback_queue],
                reply_to=self.callback_queue.name,
                correlation_id=self.correlation_id,
            )
        with Consumer(self.conn,
                      on_message=self.on_response,
                      queues=[self.callback_queue], no_ack=True):
            while self.response is None:
                self.conn.drain_events()
        logger.info("Response {}".format(self.response))
        return self.response
class FibonacciRpcClient(object):
    def __init__(self):
        self.connection = Connection('amqp://localhost//')
        self.channel = self.connection.channel()
        self.response_queue = self.channel.queue_declare(exclusive=True)

        self.channel.basic_consume(
                queue=self.response_queue.queue,
                callback=self.on_response,
                no_ack=True)

    def on_response(self, response):
        '''consume callback'''
        correlation_id = response.properties['correlation_id']
        if correlation_id == self.corr_id:
            self.response = response.body

    def call(self, n):
        #ipdb.set_trace()
        self.response = None
        self.corr_id = str(uuid.uuid4())
        producer = Producer(self.connection)
        producer.publish(
                str(n),
                exchange='',
                routing_key='rpc_queue',
                reply_to=self.response_queue.queue,
                correlation_id=self.corr_id)

        # FIXME(Ray): if we use pika this should be
        # self.connection.process_data_events()
        # But in kombu I can not find one for this
        if self.response is None:
            self.connection.drain_events()

        return int(self.response)
Beispiel #34
0
    def test_publish__consume(self):
        connection = Connection(transport=Transport)
        channel = connection.channel()
        producer = Producer(channel, self.exchange, routing_key='test_Redis')
        consumer = Consumer(channel, queues=[self.queue])

        producer.publish({'hello2': 'world2'})
        _received = []

        def callback(message_data, message):
            _received.append(message_data)
            message.ack()

        consumer.register_callback(callback)
        consumer.consume()

        self.assertIn(channel, channel.connection.cycle._channels)
        try:
            connection.drain_events(timeout=1)
            self.assertTrue(_received)
            with self.assertRaises(socket.timeout):
                connection.drain_events(timeout=0.01)
        finally:
            channel.close()
Beispiel #35
0
    def test_publish__consume(self):
        connection = Connection(transport=Transport)
        channel = connection.channel()
        producer = Producer(channel, self.exchange, routing_key='test_Redis')
        consumer = Consumer(channel, self.queue)

        producer.publish({'hello2': 'world2'})
        _received = []

        def callback(message_data, message):
            _received.append(message_data)
            message.ack()

        consumer.register_callback(callback)
        consumer.consume()

        self.assertIn(channel, channel.connection.cycle._channels)
        try:
            connection.drain_events(timeout=1)
            self.assertTrue(_received)
            with self.assertRaises(socket.timeout):
                connection.drain_events(timeout=0.01)
        finally:
            channel.close()
Beispiel #36
0
class RabbitmqDaemon(threading.Thread):
  def __init__(self, cmd_q=None, reply_q=None):
    threading.Thread.__init__(self)
    self.settings = ConfigParser.ConfigParser()
    self.settings.read('../config/site.ini')
    self.rabbitmqUsername = self.settings.get('rabbitmq', 'username')
    self.rabbitmqPassword = self.settings.get('rabbitmq', 'password')
    self.rabbitmqHost = self.settings.get('rabbitmq', 'host')
    self.conn = Connection('amqp://'+self.rabbitmqUsername+':'+self.rabbitmqPassword+'@'+self.rabbitmqHost+':5672//')
    self.producer = Producer(self.conn.channel(), exchange = Exchange('eyezon.status', type='fanout'), serializer="json")
    self.rpcProducer= Producer(self.conn.channel(), serializer="json")

    self.cmd_q = cmd_q or Queue.Queue()
    self.reply_q = reply_q or Queue.Queue()

    queue = kombu.Queue(
        name="eyezon.cmd",
        exchange=Exchange('eyezon.cmd'),
        channel=self.conn.channel(),
        durable=False,
        exclusive=False,
        auto_delete=True)
    self.consumer = Consumer(self.conn.channel(), queues = queue, auto_declare=True, callbacks=[self.send_cmd])
    self.consumer.consume(no_ack=True)

    self.alarmCache = {
      "zoneTimerDump": None,
      "keypadUpdate": None,
      "zoneStateChange": None,
      "partitionStateChange": None,
      "realtimeCIDEvent": None,
      "zoneTimerDump": None
    }

  def send_cmd(self, message, req):
    msg = message.encode('utf-8')
    if msg == "^02,$" and self.alarmCache['zoneTimerDump'] != None:
      self.rpcReply(self.alarmCache['zoneTimerDump'], req)
    elif msg == "getKeypadStatus" and self.alarmCache['keypadUpdate'] != None:
      self.rpcReply(self.alarmCache['keypadUpdate'], req)

    if msg != "getKeypadStatus":
      self.cmd_q.put(msg)

  def rpcReply(self, message, req):
    self.rpcProducer.publish(body=message, **dict({'routing_key': req.properties['reply_to'],
                'correlation_id': req.properties.get('correlation_id'),
                'content_encoding': req.content_encoding}))


  def publishEvent(self, event):
    if event['name'] == "Zone Timer Dump":
      self.alarmCache["zoneTimerDump"] = event
    elif event['name'] == "Virtual Keypad Update":
      self.alarmCache["keypadUpdate"] = event
    elif event['name'] == "Zone State Change":
      self.alarmCache["zoneStateChange"] = event
    elif event['name'] == "Partition State Change":
      self.alarmCache["partitionStateChange"] = event
    elif event['name'] == "Realtime CID Event":
      self.alarmCache["realtimeCIDEvent"] = event
    elif event['name'] == "Zone Timer Dump":
      self.alarmCache["zoneTimerDump"] = event
    self.producer.publish(exchange = 'eyezon.status', routing_key = "", body = event)

  def run(self):
    while 1:
      try:
        cmd = self.reply_q.get(True, 0.1)
        self.publishEvent(cmd)

      except Queue.Empty as e:
        try:
          self.conn.drain_events(timeout=0.1)
        except socket.timeout:
          None
class test_FilesystemTransport:
    def setup(self):
        self.channels = set()
        try:
            data_folder_in = tempfile.mkdtemp()
            data_folder_out = tempfile.mkdtemp()
        except Exception:
            raise SkipTest('filesystem transport: cannot create tempfiles')
        self.c = Connection(transport='filesystem',
                            transport_options={
                                'data_folder_in': data_folder_in,
                                'data_folder_out': data_folder_out,
                            })
        self.channels.add(self.c.default_channel)
        self.p = Connection(transport='filesystem',
                            transport_options={
                                'data_folder_in': data_folder_out,
                                'data_folder_out': data_folder_in,
                            })
        self.channels.add(self.p.default_channel)
        self.e = Exchange('test_transport_filesystem')
        self.q = Queue('test_transport_filesystem',
                       exchange=self.e,
                       routing_key='test_transport_filesystem')
        self.q2 = Queue('test_transport_filesystem2',
                        exchange=self.e,
                        routing_key='test_transport_filesystem2')

    def teardown(self):
        # make sure we don't attempt to restore messages at shutdown.
        for channel in self.channels:
            try:
                channel._qos._dirty.clear()
            except AttributeError:
                pass
            try:
                channel._qos._delivered.clear()
            except AttributeError:
                pass

    def _add_channel(self, channel):
        self.channels.add(channel)
        return channel

    def test_produce_consume_noack(self):
        producer = Producer(self._add_channel(self.p.channel()), self.e)
        consumer = Consumer(self._add_channel(self.c.channel()),
                            self.q,
                            no_ack=True)

        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_filesystem')

        _received = []

        def callback(message_data, message):
            _received.append(message)

        consumer.register_callback(callback)
        consumer.consume()

        while 1:
            if len(_received) == 10:
                break
            self.c.drain_events()

        assert len(_received) == 10

    def test_produce_consume(self):
        producer_channel = self._add_channel(self.p.channel())
        consumer_channel = self._add_channel(self.c.channel())
        producer = Producer(producer_channel, self.e)
        consumer1 = Consumer(consumer_channel, self.q)
        consumer2 = Consumer(consumer_channel, self.q2)
        self.q2(consumer_channel).declare()

        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_filesystem')
        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_filesystem2')

        _received1 = []
        _received2 = []

        def callback1(message_data, message):
            _received1.append(message)
            message.ack()

        def callback2(message_data, message):
            _received2.append(message)
            message.ack()

        consumer1.register_callback(callback1)
        consumer2.register_callback(callback2)

        consumer1.consume()
        consumer2.consume()

        while 1:
            if len(_received1) + len(_received2) == 20:
                break
            self.c.drain_events()

        assert len(_received1) + len(_received2) == 20

        # compression
        producer.publish({'compressed': True},
                         routing_key='test_transport_filesystem',
                         compression='zlib')
        m = self.q(consumer_channel).get()
        assert m.payload == {'compressed': True}

        # queue.delete
        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_filesystem')
        assert self.q(consumer_channel).get()
        self.q(consumer_channel).delete()
        self.q(consumer_channel).declare()
        assert self.q(consumer_channel).get() is None

        # queue.purge
        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_filesystem2')
        assert self.q2(consumer_channel).get()
        self.q2(consumer_channel).purge()
        assert self.q2(consumer_channel).get() is None
Beispiel #38
0
class test_FilesystemTransport(TestCase):

    def setUp(self):
        try:
            data_folder_in = tempfile.mkdtemp()
            data_folder_out = tempfile.mkdtemp()
        except Exception:
            raise SkipTest('filesystem transport: cannot create tempfiles')
        self.c = Connection(transport='filesystem',
                            transport_options={
                                'data_folder_in': data_folder_in,
                                'data_folder_out': data_folder_out,
                            })
        self.p = Connection(transport='filesystem',
                            transport_options={
                                'data_folder_in': data_folder_out,
                                'data_folder_out': data_folder_in,
                            })
        self.e = Exchange('test_transport_filesystem')
        self.q = Queue('test_transport_filesystem',
                       exchange=self.e,
                       routing_key='test_transport_filesystem')
        self.q2 = Queue('test_transport_filesystem2',
                        exchange=self.e,
                        routing_key='test_transport_filesystem2')

    def test_produce_consume_noack(self):
        producer = Producer(self.p.channel(), self.e)
        consumer = Consumer(self.c.channel(), self.q, no_ack=True)

        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_filesystem')

        _received = []

        def callback(message_data, message):
            _received.append(message)

        consumer.register_callback(callback)
        consumer.consume()

        while 1:
            if len(_received) == 10:
                break
            self.c.drain_events()

        self.assertEqual(len(_received), 10)

    def test_produce_consume(self):
        producer_channel = self.p.channel()
        consumer_channel = self.c.channel()
        producer = Producer(producer_channel, self.e)
        consumer1 = Consumer(consumer_channel, self.q)
        consumer2 = Consumer(consumer_channel, self.q2)
        self.q2(consumer_channel).declare()

        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_filesystem')
        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_filesystem2')

        _received1 = []
        _received2 = []

        def callback1(message_data, message):
            _received1.append(message)
            message.ack()

        def callback2(message_data, message):
            _received2.append(message)
            message.ack()

        consumer1.register_callback(callback1)
        consumer2.register_callback(callback2)

        consumer1.consume()
        consumer2.consume()

        while 1:
            if len(_received1) + len(_received2) == 20:
                break
            self.c.drain_events()

        self.assertEqual(len(_received1) + len(_received2), 20)

        # compression
        producer.publish({'compressed': True},
                         routing_key='test_transport_filesystem',
                         compression='zlib')
        m = self.q(consumer_channel).get()
        self.assertDictEqual(m.payload, {'compressed': True})

        # queue.delete
        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_filesystem')
        self.assertTrue(self.q(consumer_channel).get())
        self.q(consumer_channel).delete()
        self.q(consumer_channel).declare()
        self.assertIsNone(self.q(consumer_channel).get())

        # queue.purge
        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_filesystem2')
        self.assertTrue(self.q2(consumer_channel).get())
        self.q2(consumer_channel).purge()
        self.assertIsNone(self.q2(consumer_channel).get())
class AutophonePulseMonitor(object):
    """AutophonePulseMonitor provides the means to be notified when
    Android builds are available for testing and when users have initiated
    retriggers and cancels via the Treeherder UI. Builds can be selected using
    repository names, Android platform names or build types.

    AutophonePulseMonitor detects new builds by listening to
    un-normalized buildbot initiated pulse messages rather than the
    normalized messages in order to obtain the check-in comment for a
    build. The comment is used to determine if a try build has
    requested Autophone testing.

    :param hostname: Hostname of Pulse. Defaults to the production pulse
        server pulse.mozilla.org.
    :param userid: Pulse User id
    :param password: Pulse Password
    :param virtual_host: AMQP virtual host, defaults to '/'.
    :param durable_queues: If True, will create durable queues in
        Pulse for the build and job action messages. Defaults to
        False. In production, durable_queues should be set to True to
        avoid losing messages if the connection is broken or the
        application crashes.
    :param build_exchange_name: Name of build exchange. Defaults to
        'exchange/build/'.
    :param build_queue_name: Build queue name suffix. Defaults to
        'builds'. The pulse build queue will be created with a name
        of the form 'queue/<userid>/<build_queue_name>'.
    :param jobaction_exchange_name: Name of job action exchange.
        Defaults to 'exchange/treeherder/v1/job-actions'. Use
        'exchange/treeherder-stage/v1/job-actions' to listen to job
        action messages for Treeherder staging.
    :param jobaction_queue_name: Job action queue name suffix. Defaults to
        'jobactions'. The pulse jobaction queue will be created with a name
        of the form 'queue/<userid>/<jobaction_queue_name>'.
    :param build_callback: Required callback function which takes a
        single `build_data` object as argument containing information
        on matched builds. `build_callback` is always called on a new
        thread.  `build_data` is an object which is guaranteed to
        contain the following keys:
            'appName': Will always be 'Fennec'
            'branch':  The repository name of the build, e.g. 'mozilla-central'.
            'comments': Check-in comment.
            'packageUrl': The url to the apk package for the build.
            'platform': The platform name of the build, e.g. 'android-api-11'
        `build_data` may also contain the following keys:
            'buildid': Build id in CCYYMMDDHHMMSS format.
            'robocopApkUrl': Url to robocop apk for the build.
            'symbolsUrl': Url to the symbols zip file for the build.
            'testsUrl': Url to the tests zip file for the build.
            'who': Check-in Commiter.
    :param jobaction_callback: Required callback function which takes a
        single `jobaction_data` object as argument containing information
        on matched actions. `jobaction_callback` is always called on a new
        thread.  `jobaction_data` is an object which is contains the following keys:
            'action': 'cancel' or 'retrigger',
            'project': repository name,
            'job_id': treeherder job_id,
            'job_guid': treeherder job_guid,
            'build_type': 'opt' or 'debug',
            'platform': the detected platform,
            'build_url': build url,
            'machine_name': name of machine ,
            'job_group_name': treeherder job group name,
            'job_group_symbol': treeherder job group symbol,
            'job_type_name': treeherder job type name,
            'job_type_symbol': treeherder job type symbol,
            'result': test result result',
    :param treeherder_url: Optional Treeherder server url if Treeherder
        job action pulse messages are to be processed. Defaults to None.
    :param trees: Required list of repository names to be matched.
    :param platforms: Required list of platforms to be
        matched. Currently, the possible values are 'android',
        'android-api-9', 'android-api-10', 'android-api-11', and
        'android-x86'.
    :param buildtypes: Required list of build types to
        process. Possible values are 'opt', 'debug'
    :param timeout: Timeout in seconds for the kombu connection
        drain_events. Defaults to 5 seconds.
    :param shared_lock: Required lock used to control concurrent
        access. Used to prevent socket based deadlocks.
    :param verbose: If True, will log build and job action messages.
        Defaults to False.

    Usage:

    ::
    import threading
    import time
    from optparse import OptionParser

    parser = OptionParser()

    def build_callback(build_data):
        logger = logging.getLogger()
        logger.debug('PULSE BUILD FOUND %s' % build_data)

    def jobaction_callback(job_action):
        logger = logging.getLogger()
        if job_action['job_group_name'] != 'Autophone':
            return
        logger.debug('JOB ACTION FOUND %s' % json.dumps(
            job_action, sort_keys=True, indent=4))

    logging.basicConfig()
    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)

    parser.add_option('--pulse-user', action='store', type='string',
                      dest='pulse_user', default='',
                      help='user id for connecting to PulseGuardian')
    parser.add_option('--pulse-password', action='store', type='string',
                      dest='pulse_password', default='',
                      help='password for connecting to PulseGuardian')

    (options, args) = parser.parse_args()

    shared_lock = threading.Lock()
    monitor = AutophonePulseMonitor(
        userid=options.pulse_user,
        password=options.pulse_password,
        jobaction_exchange_name='exchange/treeherder-stage/v1/job-actions',
        build_callback=build_callback,
        jobaction_callback=jobaction_callback,
        trees=['try', 'mozilla-inbound'],
        platforms=['android-api-9', 'android-api-11'],
        buildtypes=['opt'],
        shared_lock=shared_lock)

    monitor.start()
    time.sleep(3600)
    """

    def __init__(self,
                 hostname='pulse.mozilla.org',
                 userid=None,
                 password=None,
                 virtual_host='/',
                 durable_queues=False,
                 build_exchange_name='exchange/build/',
                 build_queue_name='builds',
                 jobaction_exchange_name='exchange/treeherder/v1/job-actions',
                 jobaction_queue_name='jobactions',
                 build_callback=None,
                 jobaction_callback=None,
                 treeherder_url=None,
                 trees=[],
                 platforms=[],
                 buildtypes=[],
                 timeout=5,
                 shared_lock=None,
                 verbose=False):

        assert userid, "userid is required."
        assert password, "password is required."
        assert build_callback, "build_callback is required."
        assert trees, "trees is required."
        assert platforms, "platforms is required."
        assert buildtypes, "buildtypes is required."
        assert shared_lock, "shared_lock is required."

        self.treeherder_url = treeherder_url
        self.build_callback = build_callback
        self.jobaction_callback = jobaction_callback
        self.trees = list(trees)
        self.platforms = list(platforms)
        # Sort the platforms in descending order of length, so we do
        # not make a match on a substring of the platform prematurely.
        self.platforms.sort(cmp=lambda x,y: (len(y) - len(x)))
        self.buildtypes = list(buildtypes)
        self.timeout = timeout
        self.shared_lock = shared_lock
        self.verbose = verbose
        self._stopping = threading.Event()
        self.listen_thread = None
        # connection does not connect to the server until either the
        # connection.connect() method is called explicitly or until
        # kombu calls it implicitly as needed.
        self.connection = Connection(hostname=hostname,
                                     userid=userid,
                                     password=password,
                                     virtual_host=virtual_host,
                                     port=DEFAULT_SSL_PORT,
                                     ssl=True)
        build_exchange = Exchange(name=build_exchange_name, type='topic')
        self.queues = [Queue(name='queue/%s/build' % userid,
                             exchange=build_exchange,
                             routing_key='build.#.finished',
                             durable=durable_queues,
                             auto_delete=not durable_queues)]
        if treeherder_url:
            jobaction_exchange = Exchange(name=jobaction_exchange_name, type='topic')
            self.queues.append(Queue(name='queue/%s/jobactions' % userid,
                                 exchange=jobaction_exchange,
                                 routing_key='#',
                                 durable=durable_queues,
                                 auto_delete=not durable_queues))

    def start(self):
        """Runs the `listen` method on a new thread."""
        if self.listen_thread and self.listen_thread.is_alive():
            logger.warning('AutophonePulseMonitor.start: listen thread already started')
            return
        logger.debug('AutophonePulseMonitor.start: listen thread starting')
        self.listen_thread = threading.Thread(target=self.listen,
                                              name='PulseMonitorThread')
        self.listen_thread.daemon = True
        self.listen_thread.start()

    def stop(self):
        """Stops the pulse monitor listen thread."""
        logger.debug('AutophonePulseMonitor stopping')
        self._stopping.set()
        self.listen_thread.join()
        logger.debug('AutophonePulseMonitor stopped')

    def is_alive(self):
        return self.listen_thread.is_alive()

    def listen(self):
        logger.debug('AutophonePulseMonitor: start shared_lock.acquire')
        self.shared_lock.acquire()
        try:
            consumer = self.connection.Consumer(self.queues,
                                                callbacks=[self.handle_message],
                                                accept=['json'],
                                                auto_declare=False)
            for queue in self.queues:
                queue(self.connection).queue_declare(passive=False)
                queue(self.connection).queue_bind()
            with consumer:
                while not self._stopping.is_set():
                    try:
                        logger.debug('AutophonePulseMonitor shared_lock.release')
                        self.shared_lock.release()
                        self.connection.drain_events(timeout=self.timeout)
                    except socket.timeout:
                        pass
                    except KeyboardInterrupt:
                        raise
                    finally:
                        logger.debug('AutophonePulseMonitor shared_lock.acquire')
                        self.shared_lock.acquire()
            logger.debug('AutophonePulseMonitor.listen: stopping')
        except:
            logger.exception('AutophonePulseMonitor Exception')
        finally:
            logger.debug('AutophonePulseMonitor exit shared_lock.release')
            self.shared_lock.release()
            self.connection.release()

    def handle_message(self, data, message):
        if self._stopping.is_set():
            return
        message.ack()
        if '_meta' in data and 'payload' in data:
            self.handle_build(data, message)
        if (self.treeherder_url and 'action' in data and
            'project' in data and 'job_id' in data):
            self.handle_jobaction(data, message)

    def handle_build(self, data, message):
        if self.verbose:
            logger.debug(
                'handle_build:\n'
                '\tdata   : %s\n'
                '\tmessage: %s' % (
                    json.dumps(data, sort_keys=True, indent=4),
                    json.dumps(message.__dict__, sort_keys=True, indent=4)))
        try:
            build = data['payload']['build']
        except (KeyError, TypeError), e:
            logger.debug('AutophonePulseMonitor.handle_build_event: %s pulse build data' % e)
            return

        fields = (
            'appName',       # Fennec
            'branch',
            'buildid',
            'comments',
            'packageUrl',
            'platform',
            'robocopApkUrl',
            'symbolsUrl',
            'testsUrl',
            'who'
        )

        required_fields = (
            'appName',       # Fennec
            'branch',        # mozilla-central, ...
            'comments',
            'packageUrl',
            'platform',      # android...
        )

        build_data = {}
        builder_name = build['builderName']
        build_data['builder_name'] = builder_name
        build_data['build_type'] = 'debug' if 'debug' in builder_name else 'opt'

        for property in build['properties']:
            property_name = property[0]
            if property_name in fields and len(property) > 1 and property[1]:
                build_data[property_name] = type(property[1])(property[1])

        for required_field in required_fields:
            if required_field not in build_data or not build_data[required_field]:
                return

        if build_data['appName'] != 'Fennec':
            return
        if not build_data['platform'].startswith('android'):
            return
        if build_data['branch'] not in self.trees:
            return
        if build_data['platform'] not in self.platforms:
            return
        if build_data['build_type'] not in self.buildtypes:
            return
        if build_data['branch'] == 'try' and 'autophone' not in build_data['comments']:
            return

        self.build_callback(build_data)
Beispiel #40
0
class test_MemoryTransport(Case):

    def setup(self):
        self.c = Connection(transport='memory')
        self.e = Exchange('test_transport_memory')
        self.q = Queue('test_transport_memory',
                       exchange=self.e,
                       routing_key='test_transport_memory')
        self.q2 = Queue('test_transport_memory2',
                        exchange=self.e,
                        routing_key='test_transport_memory2')
        self.fanout = Exchange('test_transport_memory_fanout', type='fanout')
        self.q3 = Queue('test_transport_memory_fanout1',
                        exchange=self.fanout)
        self.q4 = Queue('test_transport_memory_fanout2',
                        exchange=self.fanout)

    def test_driver_version(self):
        self.assertTrue(self.c.transport.driver_version())

    def test_produce_consume_noack(self):
        channel = self.c.channel()
        producer = Producer(channel, self.e)
        consumer = Consumer(channel, self.q, no_ack=True)

        for i in range(10):
            producer.publish({'foo': i}, routing_key='test_transport_memory')

        _received = []

        def callback(message_data, message):
            _received.append(message)

        consumer.register_callback(callback)
        consumer.consume()

        while 1:
            if len(_received) == 10:
                break
            self.c.drain_events()

        self.assertEqual(len(_received), 10)

    def test_produce_consume_fanout(self):
        producer = self.c.Producer()
        consumer = self.c.Consumer([self.q3, self.q4])

        producer.publish(
            {'hello': 'world'},
            declare=consumer.queues,
            exchange=self.fanout,
        )

        self.assertEqual(self.q3(self.c).get().payload, {'hello': 'world'})
        self.assertEqual(self.q4(self.c).get().payload, {'hello': 'world'})
        self.assertIsNone(self.q3(self.c).get())
        self.assertIsNone(self.q4(self.c).get())

    def test_produce_consume(self):
        channel = self.c.channel()
        producer = Producer(channel, self.e)
        consumer1 = Consumer(channel, self.q)
        consumer2 = Consumer(channel, self.q2)
        self.q2(channel).declare()

        for i in range(10):
            producer.publish({'foo': i}, routing_key='test_transport_memory')
        for i in range(10):
            producer.publish({'foo': i}, routing_key='test_transport_memory2')

        _received1 = []
        _received2 = []

        def callback1(message_data, message):
            _received1.append(message)
            message.ack()

        def callback2(message_data, message):
            _received2.append(message)
            message.ack()

        consumer1.register_callback(callback1)
        consumer2.register_callback(callback2)

        consumer1.consume()
        consumer2.consume()

        while 1:
            if len(_received1) + len(_received2) == 20:
                break
            self.c.drain_events()

        self.assertEqual(len(_received1) + len(_received2), 20)

        # compression
        producer.publish({'compressed': True},
                         routing_key='test_transport_memory',
                         compression='zlib')
        m = self.q(channel).get()
        self.assertDictEqual(m.payload, {'compressed': True})

        # queue.delete
        for i in range(10):
            producer.publish({'foo': i}, routing_key='test_transport_memory')
        self.assertTrue(self.q(channel).get())
        self.q(channel).delete()
        self.q(channel).declare()
        self.assertIsNone(self.q(channel).get())

        # queue.purge
        for i in range(10):
            producer.publish({'foo': i}, routing_key='test_transport_memory2')
        self.assertTrue(self.q2(channel).get())
        self.q2(channel).purge()
        self.assertIsNone(self.q2(channel).get())

    def test_drain_events(self):
        with self.assertRaises(socket.timeout):
            self.c.drain_events(timeout=0.1)

        c1 = self.c.channel()
        c2 = self.c.channel()

        with self.assertRaises(socket.timeout):
            self.c.drain_events(timeout=0.1)

        del(c1)  # so pyflakes doesn't complain.
        del(c2)

    def test_drain_events_unregistered_queue(self):
        c1 = self.c.channel()
        producer = self.c.Producer()
        consumer = self.c.Consumer([self.q2])

        producer.publish(
            {'hello': 'world'},
            declare=consumer.queues,
            routing_key=self.q2.routing_key,
            exchange=self.q2.exchange,
        )
        message = consumer.queues[0].get()._raw

        class Cycle(object):

            def get(self, timeout=None):
                return (message, 'foo'), c1

        self.c.transport.cycle = Cycle()
        self.c.drain_events()

    def test_queue_for(self):
        chan = self.c.channel()
        chan.queues.clear()

        x = chan._queue_for('foo')
        self.assertTrue(x)
        self.assertIs(chan._queue_for('foo'), x)
class test_FilesystemTransport(Case):
    def setUp(self):
        if sys.platform == "win32":
            raise SkipTest("Needs win32con module")
        try:
            data_folder_in = tempfile.mkdtemp()
            data_folder_out = tempfile.mkdtemp()
        except Exception:
            raise SkipTest("filesystem transport: cannot create tempfiles")
        self.c = Connection(
            transport="filesystem",
            transport_options={"data_folder_in": data_folder_in, "data_folder_out": data_folder_out},
        )
        self.p = Connection(
            transport="filesystem",
            transport_options={"data_folder_in": data_folder_out, "data_folder_out": data_folder_in},
        )
        self.e = Exchange("test_transport_filesystem")
        self.q = Queue("test_transport_filesystem", exchange=self.e, routing_key="test_transport_filesystem")
        self.q2 = Queue("test_transport_filesystem2", exchange=self.e, routing_key="test_transport_filesystem2")

    def test_produce_consume_noack(self):
        producer = Producer(self.p.channel(), self.e)
        consumer = Consumer(self.c.channel(), self.q, no_ack=True)

        for i in range(10):
            producer.publish({"foo": i}, routing_key="test_transport_filesystem")

        _received = []

        def callback(message_data, message):
            _received.append(message)

        consumer.register_callback(callback)
        consumer.consume()

        while 1:
            if len(_received) == 10:
                break
            self.c.drain_events()

        self.assertEqual(len(_received), 10)

    def test_produce_consume(self):
        producer_channel = self.p.channel()
        consumer_channel = self.c.channel()
        producer = Producer(producer_channel, self.e)
        consumer1 = Consumer(consumer_channel, self.q)
        consumer2 = Consumer(consumer_channel, self.q2)
        self.q2(consumer_channel).declare()

        for i in range(10):
            producer.publish({"foo": i}, routing_key="test_transport_filesystem")
        for i in range(10):
            producer.publish({"foo": i}, routing_key="test_transport_filesystem2")

        _received1 = []
        _received2 = []

        def callback1(message_data, message):
            _received1.append(message)
            message.ack()

        def callback2(message_data, message):
            _received2.append(message)
            message.ack()

        consumer1.register_callback(callback1)
        consumer2.register_callback(callback2)

        consumer1.consume()
        consumer2.consume()

        while 1:
            if len(_received1) + len(_received2) == 20:
                break
            self.c.drain_events()

        self.assertEqual(len(_received1) + len(_received2), 20)

        # compression
        producer.publish({"compressed": True}, routing_key="test_transport_filesystem", compression="zlib")
        m = self.q(consumer_channel).get()
        self.assertDictEqual(m.payload, {"compressed": True})

        # queue.delete
        for i in range(10):
            producer.publish({"foo": i}, routing_key="test_transport_filesystem")
        self.assertTrue(self.q(consumer_channel).get())
        self.q(consumer_channel).delete()
        self.q(consumer_channel).declare()
        self.assertIsNone(self.q(consumer_channel).get())

        # queue.purge
        for i in range(10):
            producer.publish({"foo": i}, routing_key="test_transport_filesystem2")
        self.assertTrue(self.q2(consumer_channel).get())
        self.q2(consumer_channel).purge()
        self.assertIsNone(self.q2(consumer_channel).get())
Beispiel #42
0
class test_FilesystemTransport(Case):
    def setup(self):
        try:
            data_folder_in = tempfile.mkdtemp()
            data_folder_out = tempfile.mkdtemp()
        except Exception:
            raise SkipTest('filesystem transport: cannot create tempfiles')
        self.c = Connection(transport='filesystem',
                            transport_options={
                                'data_folder_in': data_folder_in,
                                'data_folder_out': data_folder_out,
                            })
        self.p = Connection(transport='filesystem',
                            transport_options={
                                'data_folder_in': data_folder_out,
                                'data_folder_out': data_folder_in,
                            })
        self.e = Exchange('test_transport_filesystem')
        self.q = Queue('test_transport_filesystem',
                       exchange=self.e,
                       routing_key='test_transport_filesystem')
        self.q2 = Queue('test_transport_filesystem2',
                        exchange=self.e,
                        routing_key='test_transport_filesystem2')

    def test_produce_consume_noack(self):
        producer = Producer(self.p.channel(), self.e)
        consumer = Consumer(self.c.channel(), self.q, no_ack=True)

        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_filesystem')

        _received = []

        def callback(message_data, message):
            _received.append(message)

        consumer.register_callback(callback)
        consumer.consume()

        while 1:
            if len(_received) == 10:
                break
            self.c.drain_events()

        self.assertEqual(len(_received), 10)

    def test_produce_consume(self):
        producer_channel = self.p.channel()
        consumer_channel = self.c.channel()
        producer = Producer(producer_channel, self.e)
        consumer1 = Consumer(consumer_channel, self.q)
        consumer2 = Consumer(consumer_channel, self.q2)
        self.q2(consumer_channel).declare()

        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_filesystem')
        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_filesystem2')

        _received1 = []
        _received2 = []

        def callback1(message_data, message):
            _received1.append(message)
            message.ack()

        def callback2(message_data, message):
            _received2.append(message)
            message.ack()

        consumer1.register_callback(callback1)
        consumer2.register_callback(callback2)

        consumer1.consume()
        consumer2.consume()

        while 1:
            if len(_received1) + len(_received2) == 20:
                break
            self.c.drain_events()

        self.assertEqual(len(_received1) + len(_received2), 20)

        # compression
        producer.publish({'compressed': True},
                         routing_key='test_transport_filesystem',
                         compression='zlib')
        m = self.q(consumer_channel).get()
        self.assertDictEqual(m.payload, {'compressed': True})

        # queue.delete
        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_filesystem')
        self.assertTrue(self.q(consumer_channel).get())
        self.q(consumer_channel).delete()
        self.q(consumer_channel).declare()
        self.assertIsNone(self.q(consumer_channel).get())

        # queue.purge
        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_filesystem2')
        self.assertTrue(self.q2(consumer_channel).get())
        self.q2(consumer_channel).purge()
        self.assertIsNone(self.q2(consumer_channel).get())
def do_consume(user_qs):

    print("about to listen no queues [%s]" %
          ", ".join(list(map(lambda x: x, user_qs))))

    conn = Connection(amqp_hosts, failover_strategy='round-robin')

    # try to get a connection no matter what
    while True:
        try:
            conn.ensure_connection(errback=on_ens_conn_err_cb)
            conn.connect()
        except Exception as e:
            print("connection error failed on exception [%s]" % repr(e))
            conn.release()
            continue
        if conn.connected:
            break
        else:
            print("connection failed in some way, retry")

    chan = conn.channel()

    global bound_cons_Q

    cons_Q = Queue(common.uuid(), queue_arguments=q_expires)
    bound_cons_Q = cons_Q(chan)
    bound_cons_Q.declare()
    # first bind to some control route
    bound_cons_Q.bind_to(priTopicXchg, routing_key='manage.#')
    for i in user_qs:
        if '*' in i or '#' in i:
            # create the wildcard route_key bind
            bound_cons_Q.bind_to(priTopicXchg, routing_key=i)
        else:
            for j in allQs:
                if i == j.as_dict()['name']:
                    bound_cons_Q.bind_to(priTopicXchg, routing_key=j.as_dict()['routing_key'])

    cons = Consumer(
            chan,
            accept=['json'],
            queues=bound_cons_Q,
            callbacks=[on_msg_cb_1, on_msg_cb_2]
            )

    print("queue set to [%s]" % bound_cons_Q.as_dict(recurse=True))
    cons.consume()
    while True:
        try:
            
            conn.drain_events()
        except conn.connection_errors + conn.channel_errors as e:
            print("connection [%s] went down (error[%s]), trying to "
                  "connect to the next one" % (conn.info(), repr(e)))
            conn.close()
            conn.release()
            conn.ensure_connection(errback=on_ens_conn_err_cb)
            conn.connect()

            chan = conn.channel()
            cons_Q.bind(chan)
            cons = Consumer(
                    chan,
                    accept=['json'],
                    queues=bound_cons_Q,
                    callbacks=[on_msg_cb_1, on_msg_cb_2]
                    )
            cons.consume()
Beispiel #44
0
    result = supervisorConn.supervisor.stopAllProcesses()
    rpcReply(result, args)
  def task_restartall(message=None, args=None):
    result = supervisorConn.system.multicall([
      {'methodName':'supervisor.stopAllProcesses'},
      {'methodName':'supervisor.startAllProcesses'}
    ])
    rpcReply(result, args)

  def on_request(body, req):
    message = json.loads(body)
    print "Received message ",message
    sys.stdout.flush()
    operations = {
      "list_processes" : list_processes,
      "task_start" : task_start,
      "task_stop" : task_stop,
      "task_restart" : task_restart,
      "read_log" : read_log
    }
    operations[message['operation']](message, req)

  #lets light this candle
  consumer = Consumer(rmqConn.channel(), queues = queue, auto_declare=True, callbacks=[on_request])
  consumer.consume(no_ack=True)

  while True:
    rmqConn.drain_events()


Beispiel #45
0
#!/usr/bin/env python

from kombu import Connection, Exchange, Queue

def process_message(body, message):
    print body
    message.ack()

nova_exchange = Exchange('nova', 'topic', durable=False)
nova_queue    = Queue('listener', exchange = nova_exchange, routing_key='#')
conn = Connection('amqp://*****:*****@192.168.0.10//')
consumer = conn.Consumer(nova_queue, callbacks=[process_message])
consumer.consume()

while True:
    conn.drain_events()
Beispiel #46
0
class Dbreader:
    def __init__(self, broker_cloud, host_influxdb):
        self.clientDB = InfluxDBClient(host_influxdb, 8086, 'root', 'root', 'Collector_DB')
        self.clientDB.create_database('Collector_DB')

        self.producer_connection = Connection(broker_cloud)
        self.consumer_connection = Connection(broker_cloud)
        self.exchange = Exchange("IoT", type="direct")

    def get_item_state(sefl, list_item_global_id):
        items = []
        for item_global_id in list_item_global_id:
            query_statement = 'SELECT * FROM \"' + item_global_id + '\" ORDER BY time DESC LIMIT 1 '
            query_result = sefl.clientDB.query(query_statement)

            for item in query_result:
                item_state = {
                    'item_global_id': item[0]['item_global_id'],
                    'item_state': item[0]['item_state'],
                    # 'last_changed': covert_time_to_correct_time_zone(item[0]['time']),
                    'last_changed': item[0]['time'],
                    'thing_global_id': item[0]['thing_global_id']
                }
                items.append(item_state)
        return items

    def api_get_item_state(self, body, message):
        print("API get_item_state")
        # Message {'list_item_global_id': [], 'reply_to': " ", }
        list_item_global_id = json.loads(body)["list_item_global_id"]
        reply_to = json.loads(body)['reply_to']
        items = self.get_item_state(list_item_global_id)
        message_response = {
            "items": items
        }
        self.producer_connection.ensure_connection()
        with Producer(self.producer_connection) as producer:
            producer.publish(
                json.dumps(message_response),
                exchange=self.exchange.name,
                routing_key=reply_to,
                retry=True
            )
            # print("Done: {}".format(items))

    def api_get_item_state_history(self, body, message):
        print("API get_item_state_history")
        # Message {'list_item_global_id': [], 'reply_to': " ", }
        list_global_id = json.loads(body)["list_global_id"]
        reply_to = json.loads(body)['reply_to']
        start_time = json.loads(body)["start_time"]
        end_time = json.loads(body)["end_time"]
        scale = json.loads(body)["scale"]
        items = self.get_item_state_history(list_global_id, start_time, end_time, scale)
        message_response = {
            "items": items
        }
        self.producer_connection.ensure_connection()
        with Producer(self.producer_connection) as producer:
            producer.publish(
                json.dumps(message_response),
                exchange=self.exchange.name,
                routing_key=reply_to,
                retry=True
            )

    def get_item_state_history(self, list_global_id, start_time, end_time, scale):
        items = []
        print(scale)
        for global_id in list_global_id:
            item_global_id = global_id['item_global_id']
            thing_global_id = global_id['thing_global_id']
            query_statement_type_field = """SHOW FIELD KEYS ON \"Collector_DB\" FROM \"{}\"""".format(item_global_id)
            query_result_type_field = self.clientDB.query(query_statement_type_field)
            type_field = list(query_result_type_field.get_points())[0]['fieldType']

            if scale == "0s":
                if type_field == 'integer' or type_field == 'float':
                    query_statement_history = """SELECT *  FROM \"{}\" where time >= \'{}\' AND time <= \'{}\'""".format(item_global_id, start_time, end_time)
                    query_result_history = self.clientDB.query(query_statement_history)
                    query_result_history = list(query_result_history.get_points())

                    query_statement_global =  """SELECT MAX(\"item_state\") AS max_state, MIN(\"item_state\") AS min_state, MEAN(\"item_state\") AS average_state FROM \"{}\" where time >= \'{}\' AND time <= \'{}\'""".format(item_global_id, start_time, end_time)
                    query_result_global = self.clientDB.query(query_statement_global)
                    query_result_global = list(query_result_global.get_points())
                    print(query_result_global[0]['max_state'], query_result_global[0]['min_state'], query_result_global[0]['average_state'])
                    if len(query_result_history) > 0 and len(query_result_global) > 0:
                        item = {
                            'item_global_id': item_global_id,
                            'thing_global_id': thing_global_id,
                            'max_global': query_result_global[0]['max_state'],
                            'min_global': query_result_global[0]['min_state'],
                            'average_global': query_result_global[0]['average_state'],
                            'history': []
                        }

                        for item_history in query_result_history:
                            item_state = {
                                'last_changed': item_history['time'],
                                'item_state': item_history['item_state'],
                            }
                            item['history'].append(item_state)
                        items.append(item)
                else:

                    query_statement_history = """SELECT *  FROM \"{}\" where time >= \'{}\' AND time <= \'{}\'""".format(item_global_id, start_time, end_time)
                    query_result_history = self.clientDB.query(query_statement_history)
                    query_result_history = list(query_result_history.get_points())

                    if len(query_result_history) > 0:
                        item = {
                            'item_global_id': item_global_id,
                            'thing_global_id': thing_global_id,
                            'history': []
                        }

                        for item_history in query_result_history:
                            item_state = {
                                'last_changed': item_history['time'],
                                'item_state': item_history['item_state'],
                            }
                            item['history'].append(item_state)
                        items.append(item)
            else:
                if type_field == 'integer' or type_field == 'float':
                    query_statement_history = """SELECT MODE(\"item_state\") AS item_state, MAX(\"item_state\") AS max_state, MIN(\"item_state\") AS min_state, MEAN(\"item_state\") AS average_state FROM \"{}\" WHERE time >= \'{}\' AND time <= \'{}\' GROUP BY time({}), thing_global_id""".format(item_global_id, start_time, end_time, scale)
                    query_result_history = self.clientDB.query(query_statement_history)
                    query_result_history = list(query_result_history.get_points())

                    query_statement_global =  """SELECT MAX(\"item_state\") AS max_state, MIN(\"item_state\") AS min_state, MEAN(\"item_state\") AS average_state FROM \"{}\" where time >= \'{}\' AND time <= \'{}\'""".format(item_global_id, start_time, end_time)
                    query_result_global = self.clientDB.query(query_statement_global)
                    query_result_global = list(query_result_global.get_points())

                    if len(query_result_history) > 0 and len(query_result_global) > 0:
                        item = {
                            'item_global_id': item_global_id,
                            'thing_global_id': thing_global_id,
                            'max_global': query_result_global[0]['max_state'],
                            'min_global': query_result_global[0]['min_state'],
                            'average_global': query_result_global[0]['average_state'],
                            'history': []
                        }
                        for item_history in query_result_history:
                            item_state = {
                                'last_changed': item_history['time'],
                                'item_state': item_history['item_state'],
                                'min_state': item_history['min_state'],
                                'max_state': item_history['max_state'],
                                'average_state': item_history['average_state']
                            }
                            item['history'].append(item_state)
                        items.append(item)
                else:
                    query_statement_history = """SELECT MODE(\"item_state\") AS item_state FROM \"{}\" WHERE time >= \'{}\' AND time <= \'{}\' GROUP BY time({}), thing_global_id""".format(item_global_id, start_time, end_time, scale)
                    query_result_history = self.clientDB.query(query_statement_history)
                    query_result_history = list(query_result_history.get_points())

                    if len(query_result_history) > 0:
                        item = {
                            'item_global_id': item_global_id,
                            'thing_global_id': thing_global_id,
                            'history': []
                        }
                        for item_history in query_result_history:
                            item_state = {
                                'last_changed': item_history['time'],
                                'item_state': item_history['item_state']
                            }
                            item['history'].append(item_state)
                        items.append(item)

        return items

    def run(self):

        queue_get_item_state = Queue(name='dbreader.request.api_get_item_state', exchange=self.exchange,
                                     routing_key='dbreader.request.api_get_item_state')#, message_ttl=20)
        queue_get_item_state_history = Queue(name='dbreader.request.api_get_item_state_history', exchange=self.exchange,
                                             routing_key='dbreader.request.api_get_item_state_history')#, message_ttl=20)
        while 1:
            try:
                self.consumer_connection.ensure_connection(max_retries=1)
                with nested(Consumer(self.consumer_connection, queues=queue_get_item_state, callbacks=[self.api_get_item_state],
                                     no_ack=True),
                            Consumer(self.consumer_connection, queues=queue_get_item_state_history,
                                     callbacks=[self.api_get_item_state_history], no_ack=True)):
                    while True:
                        self.consumer_connection.drain_events()
            except (ConnectionRefusedError, exceptions.OperationalError):
                print('Connection lost')
            except self.consumer_connection.connection_errors:
                print('Connection error')
Beispiel #47
0
class RabbitmqDaemon(threading.Thread):
  def __init__(self, actiontecQueue, timecapsuleQueue):
    threading.Thread.__init__(self)
    self.settings = ConfigParser.ConfigParser()
    self.settings.read('../config/site.ini')
    self.rabbitmqUsername = self.settings.get('rabbitmq', 'username')
    self.rabbitmqPassword = self.settings.get('rabbitmq', 'password')
    self.rabbitmqHost = self.settings.get('rabbitmq', 'host')
    self.rmqConn = Connection('amqp://'+self.rabbitmqUsername+':'+self.rabbitmqPassword+'@'+self.rabbitmqHost+':5672//')
    self.statusProducer = Producer(self.rmqConn.channel(), exchange = Exchange('actiontec.status', type='fanout'), serializer="json")
    self.timecapsuleEntries = {}
    self.discoveredHosts = {}
    self.timecapsuleQueue = timecapsuleQueue
    self.actiontecQueue = actiontecQueue

  def run(self):
    def list_mac_addresses(message=None, args=None):
      print "RPC reply with",len(self.discoveredHosts)," cached addresses"
      sys.stdout.flush()
      rpcReply(self.discoveredHosts, args)

    def rpcReply(message, req):
      rpcProducer.publish(body=message, **dict({'routing_key': req.properties['reply_to'],
                  'correlation_id': req.properties.get('correlation_id'),
                  'content_encoding': req.content_encoding}))
    operations = {
      "list_mac_addresses" : list_mac_addresses
    }

    def on_request(body, req):
      message = json.loads(body)
      print "Received message ",message
      sys.stdout.flush()
      operations[message['operation']](message, req)

    rpcProducer= Producer(self.rmqConn.channel(), serializer="json")

    queue = kombu.Queue(
      name="actiontec.cmd",
      exchange=Exchange('actiontec.cmd'),
      channel=self.rmqConn.channel(),
      durable=False,
      exclusive=False,
      auto_delete=True)
    consumer = Consumer(self.rmqConn.channel(), queues = queue, auto_declare=True, callbacks=[on_request])
    consumer.consume(no_ack=True)

    while 1:

      try:
        self.actiontecEntries = self.actiontecQueue.get(True, 0.1)
        print "Actiontec cache now contains",len(self.actiontecEntries),"entries"
      except Queue.Empty as e:
        self.timecapsuleEntries = self.timecapsuleQueue.get(True, 0.1)
        # newEntries = self.arpQueue.get(True, 0.1)
        # for entry in newEntries:
        #   self.entries[entry["mac"]] = entry
        # print "received",len(self.entries),"hosts from watcher"
        oldDiscoveredHosts = self.discoveredHosts
        self.discoveredHosts = {}
        for mac,details in self.timecapsuleEntries.iteritems():

          hostname = ""
          ip = ""
          tx = 0
          rx = 0
          txerr = 0
          rxerr = 0

          if mac in self.actiontecEntries:
            hostname = self.actiontecEntries[mac]["hostname"]
            ip = self.actiontecEntries[mac]["ip"]

          if mac in oldDiscoveredHosts:
            now = time.mktime((datetime.datetime.now()).timetuple())
            timeDelta = now - oldDiscoveredHosts[mac]["stats"]["timestamp"]

            rxDelta = int(details["rx"]) - oldDiscoveredHosts[mac]["rawStats"]["rx"]
            txDelta = int(details["tx"]) - oldDiscoveredHosts[mac]["rawStats"]["tx"]
            rxerrDelta = int(details["rxerr"]) - oldDiscoveredHosts[mac]["rawStats"]["rxerr"]
            txerrDelta = int(details["txerr"]) - oldDiscoveredHosts[mac]["rawStats"]["txerr"]

            tx = int(txDelta // timeDelta)
            rx = int(rxDelta // timeDelta)
            rxerr = int(rxerrDelta // timeDelta)
            txerr = int(txerrDelta // timeDelta)



          self.discoveredHosts[mac] = {
            "hostname": hostname,
            "ip": ip,
            "mac": mac,
            "rate": details['rate'],
            "stats":{
              "tx":tx,
              "rx":rx,
              "txerr":txerr,
              "rxerr":rxerr,
              "signal":int(details['signal']),
              "noise": int(details['noise']),
              "timestamp": time.mktime((datetime.datetime.now()).timetuple()),
            },
            "rawStats":{
              "tx": details['tx'],
              "rx": details['rx'],
              "rates": details['rates'],
              "time": int(details['time']),
              "noise": int(details['noise']),
              "rate": int(details['rate']),
              "rx": int(details['rx']),
              "tx": int(details['tx']),
              "rxerr": int(details['rxerr']),
              "txerr": int(details['txerr'])
            }
          }
        reply = []
        for mac,details in self.discoveredHosts.iteritems():
          reply.append({
            "hostname":details["hostname"],
            "ip":details["ip"],
            "mac":details["mac"],
            "rate":details["rate"],
            "stats":details["stats"]
            })
        self.statusProducer.publish(body = reply)
      except Queue.Empty as e:
        try:
          self.rmqConn.drain_events(timeout=0.1)
        except:
          None
Beispiel #48
0
class test_FilesystemTransport:

    def setup(self):
        self.channels = set()
        try:
            data_folder_in = tempfile.mkdtemp()
            data_folder_out = tempfile.mkdtemp()
        except Exception:
            raise SkipTest('filesystem transport: cannot create tempfiles')
        self.c = Connection(transport='filesystem',
                            transport_options={
                                'data_folder_in': data_folder_in,
                                'data_folder_out': data_folder_out,
                            })
        self.channels.add(self.c.default_channel)
        self.p = Connection(transport='filesystem',
                            transport_options={
                                'data_folder_in': data_folder_out,
                                'data_folder_out': data_folder_in,
                            })
        self.channels.add(self.p.default_channel)
        self.e = Exchange('test_transport_filesystem')
        self.q = Queue('test_transport_filesystem',
                       exchange=self.e,
                       routing_key='test_transport_filesystem')
        self.q2 = Queue('test_transport_filesystem2',
                        exchange=self.e,
                        routing_key='test_transport_filesystem2')

    def teardown(self):
        # make sure we don't attempt to restore messages at shutdown.
        for channel in self.channels:
            try:
                channel._qos._dirty.clear()
            except AttributeError:
                pass
            try:
                channel._qos._delivered.clear()
            except AttributeError:
                pass

    def _add_channel(self, channel):
        self.channels.add(channel)
        return channel

    def test_produce_consume_noack(self):
        producer = Producer(self._add_channel(self.p.channel()), self.e)
        consumer = Consumer(self._add_channel(self.c.channel()), self.q,
                            no_ack=True)

        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_filesystem')

        _received = []

        def callback(message_data, message):
            _received.append(message)

        consumer.register_callback(callback)
        consumer.consume()

        while 1:
            if len(_received) == 10:
                break
            self.c.drain_events()

        assert len(_received) == 10

    def test_produce_consume(self):
        producer_channel = self._add_channel(self.p.channel())
        consumer_channel = self._add_channel(self.c.channel())
        producer = Producer(producer_channel, self.e)
        consumer1 = Consumer(consumer_channel, self.q)
        consumer2 = Consumer(consumer_channel, self.q2)
        self.q2(consumer_channel).declare()

        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_filesystem')
        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_filesystem2')

        _received1 = []
        _received2 = []

        def callback1(message_data, message):
            _received1.append(message)
            message.ack()

        def callback2(message_data, message):
            _received2.append(message)
            message.ack()

        consumer1.register_callback(callback1)
        consumer2.register_callback(callback2)

        consumer1.consume()
        consumer2.consume()

        while 1:
            if len(_received1) + len(_received2) == 20:
                break
            self.c.drain_events()

        assert len(_received1) + len(_received2) == 20

        # compression
        producer.publish({'compressed': True},
                         routing_key='test_transport_filesystem',
                         compression='zlib')
        m = self.q(consumer_channel).get()
        assert m.payload == {'compressed': True}

        # queue.delete
        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_filesystem')
        assert self.q(consumer_channel).get()
        self.q(consumer_channel).delete()
        self.q(consumer_channel).declare()
        assert self.q(consumer_channel).get() is None

        # queue.purge
        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_filesystem2')
        assert self.q2(consumer_channel).get()
        self.q2(consumer_channel).purge()
        assert self.q2(consumer_channel).get() is None
Beispiel #49
0
'''
Created on May 19, 2014

@author: xiaoxubeii
'''
from kombu import Connection
from kombu.messaging import Consumer
from entity1 import queue

connection = Connection("amqp://*****:*****@xiaoxubeii:5672//")
channel = connection.channel()

def test(body, message):
    print body
    message.ack()
    
consumer = Consumer(channel, queue)
consumer.register_callback(test)
consumer.consume()

while True:
    connection.drain_events()
Beispiel #50
0
class test_PyroTransport:

    def setup(self):
        self.c = Connection(transport='pyro', virtual_host="kombu.broker")
        self.e = Exchange('test_transport_pyro')
        self.q = Queue('test_transport_pyro',
                       exchange=self.e,
                       routing_key='test_transport_pyro')
        self.q2 = Queue('test_transport_pyro2',
                        exchange=self.e,
                        routing_key='test_transport_pyro2')
        self.fanout = Exchange('test_transport_pyro_fanout', type='fanout')
        self.q3 = Queue('test_transport_pyro_fanout1',
                        exchange=self.fanout)
        self.q4 = Queue('test_transport_pyro_fanout2',
                        exchange=self.fanout)

    def test_driver_version(self):
        assert self.c.transport.driver_version()

    @pytest.mark.skip("requires running Pyro nameserver and Kombu Broker")
    def test_produce_consume_noack(self):
        channel = self.c.channel()
        producer = Producer(channel, self.e)
        consumer = Consumer(channel, self.q, no_ack=True)

        for i in range(10):
            producer.publish({'foo': i}, routing_key='test_transport_pyro')

        _received = []

        def callback(message_data, message):
            _received.append(message)

        consumer.register_callback(callback)
        consumer.consume()

        while 1:
            if len(_received) == 10:
                break
            self.c.drain_events()

        assert len(_received) == 10

    def test_drain_events(self):
        with pytest.raises(socket.timeout):
            self.c.drain_events(timeout=0.1)

        c1 = self.c.channel()
        c2 = self.c.channel()

        with pytest.raises(socket.timeout):
            self.c.drain_events(timeout=0.1)

        del(c1)  # so pyflakes doesn't complain.
        del(c2)

    @pytest.mark.skip("requires running Pyro nameserver and Kombu Broker")
    def test_drain_events_unregistered_queue(self):
        c1 = self.c.channel()
        producer = self.c.Producer()
        consumer = self.c.Consumer([self.q2])

        producer.publish(
            {'hello': 'world'},
            declare=consumer.queues,
            routing_key=self.q2.routing_key,
            exchange=self.q2.exchange,
        )
        message = consumer.queues[0].get()._raw

        class Cycle(object):

            def get(self, callback, timeout=None):
                return (message, 'foo'), c1

        self.c.transport.cycle = Cycle()
        self.c.drain_events()

    @pytest.mark.skip("requires running Pyro nameserver and Kombu Broker")
    def test_queue_for(self):
        chan = self.c.channel()
        x = chan._queue_for('foo')
        assert x
        assert chan._queue_for('foo') is x
Beispiel #51
0
class test_basic(TestCase):

    def setUp(self):
        self.c = Connection(transport='ironmq')
        self.p = Connection(transport='ironmq')
        self.e = Exchange('test_transport_iron')
        self.q = Queue('test_transport_iron',
                       exchange=self.e,
                       routing_key='test_transport_iron')
        self.q2 = Queue('test_transport_iron2',
                        exchange=self.e,
                        routing_key='test_transport_iron2')
        self.q(self.c.channel()).delete()
        self.q2(self.c.channel()).delete()

    def test_produce_consume_noack(self):
        producer = Producer(self.p.channel(), self.e)
        consumer = Consumer(self.c.channel(), self.q, no_ack=True)

        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_iron')

        _received = []

        def callback(message_data, message):
            _received.append(message)

        consumer.register_callback(callback)
        consumer.consume()

        while 1:
            if len(_received) == 10:
                break
            self.c.drain_events()

        self.assertEqual(len(_received), 10)

    def test_produce_consume(self):
        producer_channel = self.p.channel()
        consumer_channel = self.c.channel()
        producer = Producer(producer_channel, self.e)
        consumer1 = Consumer(consumer_channel, self.q)
        consumer2 = Consumer(consumer_channel, self.q2)
        self.q2(consumer_channel).declare()

        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_iron')
        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_iron2')

        _received1 = []
        _received2 = []

        def callback1(message_data, message):
            _received1.append(message)
            message.ack()

        def callback2(message_data, message):
            _received2.append(message)
            message.ack()

        consumer1.register_callback(callback1)
        consumer2.register_callback(callback2)

        consumer1.consume()
        consumer2.consume()

        while 1:
            if len(_received1) + len(_received2) == 20:
                break
            self.c.drain_events()

        self.assertEqual(len(_received1) + len(_received2), 20)

        # compression
        #producer.publish({'compressed': True},
        #                 routing_key='test_transport_iron',
        #                 compression='zlib')
        #m = self.q(consumer_channel).get()
        #self.assertDictEqual(m.payload, {'compressed': True})

        # queue.delete
        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_iron')
        self.assertTrue(self.q(consumer_channel).get())
        self.q(consumer_channel).delete()
        self.q(consumer_channel).declare()
        self.assertIsNone(self.q(consumer_channel).get())

        # queue.purge
        for i in range(10):
            producer.publish({'foo': i},
                             routing_key='test_transport_iron2')
        self.assertTrue(self.q2(consumer_channel).get())
        self.q2(consumer_channel).purge()
        self.assertIsNone(self.q2(consumer_channel).get())
Beispiel #52
0
class _AMQPServerWrapper(object):
    def __init__(self, amqp_url):
        self.__connection = Connection(amqp_url)
        self.__connection.connect()
        self.__monitors = {}
        self.__running = True
        self.__consumer = Consumer(self.__connection, on_message=self.__on_message)
        self.__consumer_gl = gevent.spawn(self.__consumer_greenlet_main)

    def __consumer_greenlet_main(self):
        gevent.sleep(0)
        while self.__running:
            self.__consumer.consume()
            try:
                self.__connection.drain_events(timeout=0.1)
            except Exception as ex:     # NOQA: assigned but not used (left in for super-duper-low-level-debug)
                # print("was woken because {}".format(ex))
                pass
            gevent.sleep(0)
            # print("---loop")

    def __on_message(self, msg):
        ct = msg.delivery_info['consumer_tag']
        assert ct in self.__monitors, \
            "Message from consumer '{}', but we are not monitoring that (list={})".format(
                msg.delivery_info['consumer_tag'], self.__monitors.keys())
        mon = self.__monitors[ct]
        mon['event_cb'](msg, msg.body)

    @property
    def connected(self):
        return self.__connection.connected

    def create_add_monitor(self, exchange, routing_key, event_cb, queue_name=None):
        mname = "ex={} rk={} qn={}".format(exchange, routing_key, queue_name)
        if mname in self.__monitors:
            mon = self.__monitors[mname]
            mon["event_cb"] = event_cb
        else:
            if queue_name is None:
                queue_name = ''
                exclusive = True
            else:
                exclusive = False
            ex = Exchange(exchange, 'topic')
            queue = Queue(exchange=ex, routing_key=routing_key, exclusive=exclusive)
            bound_queue = queue.bind(self.__connection)
            self.__consumer.add_queue(bound_queue)
            bound_queue.consume(mname, self.__on_message)
            mon = {
                "event_cb": event_cb,
                "exchange": ex
            }
            self.__monitors[mname] = mon
        return mon['exchange']

    def inject(self, exchange, routing_key, payload):
        prod = Producer(self.__connection, exchange=exchange, routing_key=routing_key)
        prod.publish(payload)

    def test_helper_sync_send_msg(self, exchange, ex_rk, send_rk, payload):
        ex = Exchange(exchange, 'topic')
        queue = Queue(exchange=ex, routing_key=ex_rk + '.*', exclusive=True, channel=self.__connection)
        queue.declare()
        prod = Producer(self.__connection, exchange=ex, routing_key=send_rk)
        prod.publish(payload)
        return queue

    def test_helper_sync_recv_msg(self, queue):
        for tick in range(10):
            msg = queue.get()
            if msg is not None:
                break
            time.sleep(1)
        return msg
Queue_Port = 5672
Queue_Path = '/websocketserver'


def callback(body, message):
    print body


connection = Connection(hostname=Queue_Server,
                        port=Queue_Port,
                        userid=Queue_User,
                        password=Queue_PassWord,
                        virtual_host=Queue_Path)
channel = connection.channel()
smsExchange = Exchange("sys.sms",
                       type='topic',
                       channel=channel,
                       durable=True,
                       delivery_mode=2)
task_queue = Queue('test_recv',
                   exchange=smsExchange,
                   routing_key='sms.code',
                   durable=False,
                   channel=channel)
consumer = Consumer(channel, task_queue, no_ack=True, callbacks=[callback])
consumer.qos(prefetch_count=1)
consumer.consume()
while True:
    connection.drain_events()
connection.close()
Beispiel #54
0
class Event_Generator_1(Event_Generator_Base):
    list_event_condition = [
    ]  # list_event_condition saved as a mysql table: |condition_id | condition_name | condition |

    def __init__(self, event_generator_name, event_generator_id, description,
                 event_dest_topic):
        Event_Generator_Base.__init__(self, event_generator_name,
                                      event_generator_id, description,
                                      event_dest_topic)

        BROKER_CLOUD = "192.168.43.30"
        self.producer_connection = Connection("localhost")
        self.consumer_connection = Connection("192.168.43.30")
        self.exchange = Exchange("IoT", type="direct")
        # self.queue_get_states = Queue(name='data_source.to.' + str(self.event_generator_name), exchange=self.exchange,
        #                          routing_key='data_source.to.' + str(self.event_generator_name))#, message_ttl=20)
        self.queue_get_states = Queue(name='rule.request.states',
                                      exchange=self.exchange,
                                      routing_key='rule.request.states',
                                      message_ttl=20)

        self.db = MySQLdb.connect(host="0.0.0.0",
                                  user="******",
                                  passwd="root",
                                  db="rule")
        self.cursor = self.db.cursor()

    def read_event_condition(self):
        print("reading event condition ...")
        request = "Select rule_id, rule_content from Rule"

        try:
            # Execute the SQL command
            self.cursor.execute(request)
            results = self.cursor.fetchall()
            # self.list_event_condition = result
            # print ("results: ", results)

            for result in results:
                print("result: ", result)
                # (rule_id, rule_content) = result

                rule_id = result[0]
                print("rule_id: ", rule_id)

                # rule_content = json.dumps(rule_content)
                rule_content = json.loads(rule_content)

                print("\nrule content: ", rule_content)

                trigger_type = rule_content['trigger_type']
                trigger_content = rule_content['trigger_content']

                print("\n\ntrigger_type: ", trigger_type)
                print("\n\ntrigger_content: ", trigger_content)

                # Commit your changes in the database
                self.db.commit()

            return rule_id, trigger_type, trigger_content
        except:
            # Rollback in case there is any error
            print("error read_event_condition")
            self.db.rollback()
            return None

    def check_trigger_item_has_given_state(self, trigger_content):
        print("checking trigger item_has_given_state ...")
        print(trigger_content)

        trigger_content_list = json.loads(trigger_content)

        pre_result = False
        total_result = False

        bitwise_operator = trigger_content['trigger_bitwise']
        print("bitwise_operator: ", bitwise_operator)

        for trigger_content in trigger_content_list:
            result = False
            timer = 0

            trigger_item_id = trigger_content['id']
            print("trigger_item_id: ", trigger_item_id)

            operator = trigger_content['operator']
            print("operator: ", operator)

            value = trigger_content['value']
            print("value: ", value)

            if (value.isdigit() == True):
                value = float(value)

            request = "select time from ItemTable where item_id = '%s' order by time asc limit 1" % (
                trigger_item_id)
            print("time request: ", request)
            last_insert_time = str(datetime.max)

            try:
                # Execute the SQL command
                self.cursor.execute(request)
                request_result = self.cursor.fetchall()
                last_insert_time = request_result[0][0]
                # Commit your changes in the database
                self.db.commit()
            except:
                print("error read time")
                result = False
                pre_result = result
                self.db.rollback()

            last_insert_time = datetime.strptime(last_insert_time,
                                                 '%Y-%m-%d %H:%M:%S.%f')
            check_time = last_insert_time - timedelta(
                seconds=float(timer.split('s')[0]))

            request = "select item_state from ItemTable where item_id = '%s' and time >= '%s' order by time desc" % (
                trigger_item_id, check_time)

            try:
                # Execute the SQL command
                self.cursor.execute(request)
                request_result = self.cursor.fetchall()
                item_state_list = request_result[0]
                print("item_state_list: ", item_state_list)
                # Commit your changes in the database
                self.db.commit()

                for item_state in item_state_list:
                    print("item_state: ", item_state)
                    print("value: ", value)
                    result = True

                    if (item_state.isdigit() == True):
                        item_state = float(item_state)
                    # elif (item_state == "on"):
                    #     item_state = 1
                    # elif (item_state == "off"):
                    #     item_state = 0

                    if (operator == "GT"):
                        if (item_state <= value):
                            result = False
                            break
                    elif (operator == "GE"):
                        if (item_state < value):
                            result = False
                            break
                    elif (operator == "LT"):
                        if (item_state >= value):
                            result = False
                            break
                    elif (operator == "LE"):
                        if (item_state > value):
                            result = False
                            break
                    elif (operator == "EQ"):
                        if (item_state != value):
                            result = False
                            break
                    elif (operator == "NE"):
                        if (item_state == value):
                            result = False
                            break
                    else:
                        print("operator is not valid")
                        result = False
                        break

                print("result: ", result)

                if (bitwise_operator.upper() == "NONE"):
                    total_result = result
                    pre_result = result
                elif (bitwise_operator.upper() == "AND"):
                    total_result = pre_result and result
                    pre_result = total_result
                elif (bitwise_operator.upper() == "OR"):
                    total_result = pre_result or result
                    pre_result = total_result
                else:
                    print("bitwise operator is not pre-defined")

            except:
                # Rollback in case there is any error
                self.db.rollback()
                # print ("error check_trigger_item_has_given_state")
                # return None
                result = False
                pre_result = result

        print("total result: ", total_result)

        return total_result

    def check_trigger_condition(self, trigger_type, trigger_content, item_id):
        print("checking trigger condition ...")
        result = False

        if (trigger_type == "item_has_given_state"):
            result = self.check_trigger_item_has_given_state(trigger_content)
        else:
            print("trigger_type is not pre-defined")

        return result

    def create_event(self, rule_id):
        print("creating Event ...")

        request = "select trigger_content from RuleTable where trigger_id = '%s'" % (
            rule_id)
        # print (request)

        try:
            # Execute the SQL command
            self.cursor.execute(request)
            request_result = self.cursor.fetchall()
            print(request_result[0][0])
            trigger_content = request_result[0][0]
            trigger_content = json.loads(trigger_content)

            message = {
                'event_id': event_id,
                'event_source': event_source,
                'trigger_id': trigger_id,
                'time': str(datetime.now())
            }

            # print (trigger_content)
            output_field = trigger_content['outputs']
            print(output_field)
            # Commit your changes in the database
            self.db.commit()

            for output in output_field:
                event_id = output['event_id']
                # event_name = output['event_name']
                event_source = output['event_source']
                description = output['description']

                message = {
                    'event_generator_id': self.event_generator_id,
                    'event_id': event_id,
                    # 'event_name' : event_name,
                    'event_source': event_source,
                    'trigger_id': trigger_id,
                    'description': description,
                    'time': str(datetime.now())
                }

                self.producer_connection.ensure_connection()
                with Producer(self.producer_connection) as producer:
                    producer.publish(json.dumps(message),
                                     exchange=self.exchange.name,
                                     routing_key='event_generator.to.' +
                                     str(self.event_dest_topic),
                                     retry=True)
                print("Send event to Rule Engine: " + 'event_generator.to.' +
                      str(self.event_dest_topic))

        except:
            print("error read trigger_content")

    def write_item_to_database(self, item_id, item_name, item_type, item_state,
                               time):
        print("writting item to database ...")
        request = """INSERT INTO ItemTable(item_id, item_name, item_type, item_state, time)
                    VALUES ("%s", "%s", "%s", "%s", "%s")""" \
                  % (item_id, item_name, item_type, item_state, time)

        print(request)

        try:
            # Execute the SQL command
            print(self.cursor.execute(request))
            result = self.cursor.fetchall()
            print("write item %s to database: %s" % (item_id, result))
            self.db.commit()
            return True

        except:
            # Rollback in case there is any error
            self.db.rollback()
            print("error write item to database")
            return False

    def receive_states(self):
        def handle_notification(body, message):
            print("\n\n\n")
            print("Receive state!")

            message = json.loads(body)["body"]["states"]

            for i in range(len(message)):
                item_id = json.loads(body)["body"]["states"][i]["MetricId"]
                item_name = json.loads(
                    body)["body"]["states"][i]["MetricLocalId"]
                item_type = json.loads(
                    body)["body"]["states"][i]["DataPoint"]["DataType"]
                item_state = json.loads(
                    body)["body"]["states"][i]["DataPoint"]["Value"]

                # time = json.loads(body)[""]
                time = datetime.now()
                time = str(time)

                # Write new item to the database
                write_result = self.write_item_to_database(
                    item_id, item_name, item_type, item_state, time)

                #if write new item success to the database, consider the trigger with that item
                if (write_result == 1):
                    list_event_condition = self.read_event_condition()
                    print("list event condition: ", list_event_condition)
                    for event_condition in list_event_condition:
                        rule_id = event_condition[0]
                        trigger_type = event_condition[1]
                        trigger_content = event_condition[2]

                        # Check the item with each trigger condition
                        result = self.check_trigger_condition(
                            trigger_type, trigger_content, item_id)

                        print("check trigger : ", result)

                        if (result == None):
                            return None

                        # If checkTriggerCondition success, create an event
                        if (result == True):
                            # event_source = item_id
                            # event_name = str(item_name) + "_" + str(datetime.now())
                            # event_id   = randint(1, 1000000)
                            # my_event = Event(event_name, event_id, event_source)
                            self.create_event(rule_id)

            # End handle_notification

        try:
            self.consumer_connection.ensure_connection(max_retries=1)
            with nested(
                    Consumer(self.consumer_connection,
                             queues=self.queue_get_states,
                             callbacks=[handle_notification],
                             no_ack=True)):
                while True:
                    self.consumer_connection.drain_events()
        except (ConnectionRefusedError, exceptions.OperationalError):
            print('Connection lost')
        except self.consumer_connection.connection_errors:
            print('Connection error')

    def run(self):
        self.cursor.execute("""CREATE TABLE IF NOT EXISTS ItemTable(
                        item_id VARCHAR(50),
                        item_name VARCHAR(50),
                        item_type VARCHAR(50),
                        item_state VARCHAR(50), 
                        time VARCHAR(50),
                        PRIMARY KEY (item_id, time))""")

        while 1:
            try:
                self.receive_states()
            except (ConnectionRefusedError, exceptions.OperationalError):
                print('Connection lost')
            except self.consumer_connection.connection_errors:
                print('Connection error')
Beispiel #55
0
from kombu import Connection, Exchange, Queue, Consumer
'''
Sample Kombu consumer to drain messages from AMQP message bus
'''
rabbit_url = "amqp://{rabbit_server_ip}:5672/"
conn = Connection(rabbit_url)
channel = conn.channel()

exchange = Exchange('data_exchange', type="direct")
queue = Queue(name="data_q", exchange=exchange, routing_key="data_info")


def process_message(body, message):
    print type(body), body
    print type(message), message.body
    print("The body is {}".format(body))
    message.ack()


with Consumer(conn,
              queues=queue,
              callbacks=[process_message],
              accept=["text/plain", "json"]):
    conn.drain_events(timeout=6000)
#coding:utf-8
from kombu import Connection
from kombu.messaging import Consumer,Producer
from kombu import Exchange, Queue

Queue_User='******'
Queue_PassWord='******'
Queue_Server='124.207.209.57'
Queue_Port=None
Queue_Path='/spider'
conn=Connection(hostname=Queue_Server,port=Queue_Port,userid=Queue_User,password=Queue_PassWord,virtual_host=Queue_Path)
channel=conn.channel()
exch=Exchange('weibodownload',type='topic',durable=True,delivery_mode=2,passive=True)
#最新地理位置微薄的routing_key='weibo.geo' 每个人历史记录微薄的routing_key='weibo.user_geo'
queue=Queue(exchange=exch,routing_key='weibo.user_geo',auto_delete=True)
consumer=Consumer(channel=channel,queues=queue,no_ack=True)
consumer.qos(prefetch_count=1)
def on_response(body, message):
    print body
consumer.register_callback(on_response)
consumer.consume()
while True:
    conn.drain_events()

Beispiel #57
0
class test_MemoryTransport(TestCase):
    def setUp(self):
        self.c = Connection(transport='memory')
        self.e = Exchange('test_transport_memory')
        self.q = Queue('test_transport_memory',
                       exchange=self.e,
                       routing_key='test_transport_memory')
        self.q2 = Queue('test_transport_memory2',
                        exchange=self.e,
                        routing_key='test_transport_memory2')

    def test_produce_consume_noack(self):
        channel = self.c.channel()
        producer = Producer(channel, self.e)
        consumer = Consumer(channel, self.q, no_ack=True)

        for i in range(10):
            producer.publish({'foo': i}, routing_key='test_transport_memory')

        _received = []

        def callback(message_data, message):
            _received.append(message)

        consumer.register_callback(callback)
        consumer.consume()

        while 1:
            if len(_received) == 10:
                break
            self.c.drain_events()

        self.assertEqual(len(_received), 10)

    def test_produce_consume(self):
        channel = self.c.channel()
        producer = Producer(channel, self.e)
        consumer1 = Consumer(channel, self.q)
        consumer2 = Consumer(channel, self.q2)
        self.q2(channel).declare()

        for i in range(10):
            producer.publish({'foo': i}, routing_key='test_transport_memory')
        for i in range(10):
            producer.publish({'foo': i}, routing_key='test_transport_memory2')

        _received1 = []
        _received2 = []

        def callback1(message_data, message):
            _received1.append(message)
            message.ack()

        def callback2(message_data, message):
            _received2.append(message)
            message.ack()

        consumer1.register_callback(callback1)
        consumer2.register_callback(callback2)

        consumer1.consume()
        consumer2.consume()

        while 1:
            if len(_received1) + len(_received2) == 20:
                break
            self.c.drain_events()

        self.assertEqual(len(_received1) + len(_received2), 20)

        # compression
        producer.publish({'compressed': True},
                         routing_key='test_transport_memory',
                         compression='zlib')
        m = self.q(channel).get()
        self.assertDictEqual(m.payload, {'compressed': True})

        # queue.delete
        for i in range(10):
            producer.publish({'foo': i}, routing_key='test_transport_memory')
        self.assertTrue(self.q(channel).get())
        self.q(channel).delete()
        self.q(channel).declare()
        self.assertIsNone(self.q(channel).get())

        # queue.purge
        for i in range(10):
            producer.publish({'foo': i}, routing_key='test_transport_memory2')
        self.assertTrue(self.q2(channel).get())
        self.q2(channel).purge()
        self.assertIsNone(self.q2(channel).get())

    def test_drain_events(self):
        with self.assertRaises(socket.timeout):
            self.c.drain_events(timeout=0.1)

        c1 = self.c.channel()
        c2 = self.c.channel()

        with self.assertRaises(socket.timeout):
            self.c.drain_events(timeout=0.1)

        del (c1)  # so pyflakes doesn't complain.
        del (c2)

    def test_drain_events_unregistered_queue(self):
        c1 = self.c.channel()

        class Cycle(object):
            def get(self, timeout=None):
                return ('foo', 'foo'), c1

        self.c.transport.cycle = Cycle()
        with self.assertRaises(KeyError):
            self.c.drain_events()

    def test_queue_for(self):
        chan = self.c.channel()
        chan.queues.clear()

        x = chan._queue_for('foo')
        self.assertTrue(x)
        self.assertIs(chan._queue_for('foo'), x)
class MozReviewBot(object):

    def __init__(self, config_path=None, reviewboard_url=None,
                 reviewboard_user=None, reviewboard_password=None,
                 pulse_host=None, pulse_port=None, pulse_userid=None,
                 pulse_password=None, exchange=None, queue=None,
                 routing_key=None, pulse_timeout=None, pulse_ssl=False,
                 repo_root=None, logger=None):

        if logger is None:
            self.logger = logging.getLogger('mozreviewbot')
        else:
            self.logger = logger

        # We use options passed into __init__ preferentially. If any of these
        # are not specified, we next check the configuration file, if any.
        # Finally, we use environment variables.
        if config_path and not os.path.isfile(config_path):
            # ConfigParser doesn't seem to throw if it is unable to find the
            # config file so we'll explicitly check that it exists.
            self.logger.error('could not locate config file: %s' % (
                config_path))
            config_path = None
        if config_path:
            try:
                config = ConfigParser()
                config.read(config_path)
                reviewboard_url = (reviewboard_url
                                   or config.get('reviewboard', 'url'))
                reviewboard_user = (reviewboard_user
                                    or config.get('reviewboard', 'user'))
                reviewboard_password = (reviewboard_password
                                        or config.get('reviewboard',
                                                      'password'))
                pulse_host = pulse_host or config.get('pulse', 'host')
                pulse_port = pulse_port or config.get('pulse', 'port')
                pulse_userid = pulse_userid or config.get('pulse', 'userid')
                pulse_password = pulse_password or config.get('pulse',
                                                              'password')
                exchange = exchange or config.get('pulse', 'exchange')
                queue = queue or config.get('pulse', 'queue')
                routing_key = routing_key or config.get('pulse',
                                                        'routing_key')
                pulse_timeout = pulse_timeout or config.get('pulse',
                                                            'timeout')
                if pulse_ssl is None:
                    pulse_ssl = config.get('pulse', 'ssl')
            except NoSectionError as e:
                self.logger.error('configuration file missing section: %s' %
                                  e.section)
            try:
                repo_root = repo_root or config.get('hg', 'repo_root')
            except (NoOptionError, NoSectionError):
                # Subclasses do not need to define repo root if they do not
                # plan on using the hg functionality.
                pass

            # keep config around in case any subclasses would like to extract
            # options from it.
            self.config = config
        else:
            self.config = None

        reviewboard_url = reviewboard_url or os.environ.get('REVIEWBOARD_URL')
        pulse_host = pulse_host or os.environ.get('PULSE_HOST')
        pulse_port = pulse_port or os.environ.get('PULSE_PORT')

        self.rbclient = RBClient(reviewboard_url, username=reviewboard_user,
                                 password=reviewboard_password)
        self.api_root = self.rbclient.get_root()

        self.conn = Connection(hostname=pulse_host, port=pulse_port,
                               userid=pulse_userid, password=pulse_password,
                               ssl=pulse_ssl)

        self.exchange = Exchange(exchange, type='topic', durable=True)
        self.queue = Queue(name=queue, exchange=self.exchange, durable=True,
                           routing_key=routing_key, exclusive=False,
                           auto_delete=False)

        self.pulse_timeout = float(pulse_timeout)
        self.repo_root = repo_root

        self.hg = None
        for DIR in os.environ['PATH'].split(os.pathsep):
            p = os.path.join(DIR, 'hg')
            if os.path.exists(p):
                self.hg = p

    def _get_available_messages(self):
        messages = []

        def onmessage(body, message):
            messages.append((body, message))

        consumer = self.conn.Consumer([self.queue], callbacks=[onmessage],
                                      auto_declare=True)
        with consumer:
            try:
                self.conn.drain_events(timeout=self.pulse_timeout)
            except socket.timeout:
                pass

        return messages

    def _run_hg(self, hg_args):
        # TODO: Use hgtool.

        args = [self.hg] + hg_args

        env = dict(os.environ)
        env['HGENCODING'] = 'utf-8'

        null = open(os.devnull, 'w')

        # Execute at / to prevent Mercurial's path traversal logic from
        # kicking in and picking up unwanted config files.
        return subprocess.check_output(args, stdin=null, stderr=null,
                                       env=env, cwd='/')

    def ensure_hg_repo_exists(self, landing_repo_url, repo_url, pull_rev=None):
        # TODO: Use the root changeset in each repository as an identifier.
        #       This will enable "forks" to share the same local clone.
        #       The "share" extension now has support for this.
        #       Read hg help -e share for details about "pooled storage."
        #       We should probably deploy that.
        url = landing_repo_url or repo_url

        sha1 = hashlib.sha1(url).hexdigest()
        repo_path = os.path.join(self.repo_root, sha1)

        if not os.path.exists(repo_path):
            args = ['clone', url, repo_path]
            self.logger.debug('cloning %s' % url)
            self._run_hg(args)
            self.logger.debug('finished cloning %s' % url)

        args = ['-R', repo_path, 'pull', repo_url]

        if pull_rev:
            args.extend(['-r', pull_rev])

        self.logger.debug('pulling %s' % repo_url)
        self._run_hg(args)
        self.logger.debug('finished pulling %s' % repo_url)

        return repo_path

    def hg_commit_changes(self, repo_path, node, diff_context=None):
        """Obtain information about what changed in a Mercurial commit.

        The return value is a tuple of:

          (set(adds), set(dels), set(mods), None, diff)

        The first 4 items list what files changed in the changeset. The last
        item is a unified diff of the changeset.

        File copies are currently not returned. ``None`` is being used as a
        placeholder until support is needed.
        """
        part_delim = str(uuid.uuid4())
        item_delim = str(uuid.uuid4())

        parts = [
            '{join(file_adds, "%s")}' % item_delim,
            '{join(file_dels, "%s")}' % item_delim,
            '{join(file_mods, "%s")}' % item_delim,
            '{join(file_copies, "%s")}' % item_delim,
        ]

        template = part_delim.join(parts)

        self._run_hg(['-R', repo_path, 'up', '-C', node])

        res = self._run_hg(['-R', repo_path, 'log', '-r', node,
                            '-T', template])

        diff_args = ['-R', repo_path, 'diff', '-c', node]
        if diff_context is not None:
            diff_args.extend(['-U', str(diff_context)])
        diff = self._run_hg(diff_args)

        adds, dels, mods, copies = res.split(part_delim)
        adds = set(f for f in adds.split(item_delim) if f)
        dels = set(f for f in dels.split(item_delim) if f)
        mods = set(f for f in mods.split(item_delim) if f)
        # TODO parse the copies.

        return adds, dels, mods, None, diff

    def strip_nonpublic_changesets(self, repo_path):
        """Strip non-public changesets from a repository.

        Pulling changesets over and over results in many heads in a repository.
        This makes Mercurial slow. So, we prune non-public changesets/heads
        to keep repositories fast.
        """

        self._run_hg(['-R', repo_path, '--config', 'extensions.strip=',
                      'strip', '--no-backup', '-r', 'not public()'])

    def get_commit_files(self, commit):
        """Fetches a list of files that were changed by this commit."""

        rrid = commit['review_request_id']
        diff_revision = commit['diffset_revision']

        start = 0
        files = []
        while True:
            result = self.api_root.get_files(review_request_id=rrid,
                                             diff_revision=diff_revision,
                                             start=start)
            files.extend(result)
            start += result.num_items
            if result.num_items == 0 or start >= result.total_results:
                break
        return files

    def handle_available_messages(self):
        for body, message in self._get_available_messages():
            payload = body['payload']
            repo_url = payload['repository_url']
            landing_repo_url = payload['landing_repository_url']
            commits = payload['commits']
            # TODO: should we allow process commits to signal that we should
            #       skip acknowledging the message?
            try:
                for commit in commits:
                    rrid = commit['review_request_id']
                    diff_revision = commit['diffset_revision']

                    review = BatchReview(self.api_root, rrid, diff_revision)
                    self.process_commit(review, landing_repo_url, repo_url,
                                        commit)
            finally:
                # This prevents the queue from growing indefinitely but
                # prevents us from fixing whatever caused the exception
                # and restarting the bot to handle the message.
                message.ack()

    def listen_forever(self):
        while True:
            self.handle_available_messages()

    def process_commit(self, review, repo_url, commits):
        pass