Exemple #1
0
def main():
    rospy.init_node('iot_consumer', anonymous=True, disable_signals=True)
    params = wrap_namespace(rospy.get_param('~'))

    control_pub = rospy.Publisher(params.topic.control_cmd,
                                  Control,
                                  queue_size=10)
    mission_pub = rospy.Publisher(params.topic.mission_cmd,
                                  Mission,
                                  queue_size=10)
    cmdexe_pub = rospy.Publisher(params.topic.navi_cmdexe,
                                 String,
                                 queue_size=10)

    naviCommand = NaviCommand(control_pub, mission_pub, cmdexe_pub)
    dispatcher = Dispatcher(naviCommand)
    consumer = Consumer(dispatcher.dispatch_cb)

    def handler(signum, frame):
        rospy.loginfo('shutting down...')
        consumer.shutdown()

    signal.signal(signal.SIGINT, handler)

    Container(consumer).run()
    rospy.signal_shutdown('finish')
Exemple #2
0
    def run(self):

        self.logger.info("Running Kafka Consumer")

        consumer_thread = Consumer(self.kafka_broker, self.topic)

        consumer_thread.register_kafka_listener()
Exemple #3
0
    def publish_and_get_response(self, message, payload, timeout_secs=5):
        sent_correlation = str(uuid.uuid1())
        consumer_ready = Event()

        def on_consumer_ready():
            consumer_ready.set()

        consumer = Consumer(self.broker_url, self._queue_prefix, self.exchange)
        consumer.on_connection_setup_finished = on_consumer_ready
        response = {}
        response_received = Event()

        def response_callback(response_payload, **kwargs):
            if not sent_correlation == kwargs['properties'].correlation_id:
                return
            response['payload'] = response_payload
            response_received.set()

        def wait_for_response():
            consumer.subscribe(message + '.answered',
                               response_callback,
                               transient_queue=True)
            consumer.start()

        thread = Thread(target=wait_for_response)
        thread.daemon = True
        thread.start()

        consumer_ready.wait(2)
        self._publish(message, payload, correlation_id=sent_correlation)
        timed_out = not response_received.wait(timeout_secs)
        if timed_out:
            raise MessageBusTimeoutError()
        consumer.stop()
        return response.get('payload')
Exemple #4
0
    def __init__(self, interval, processes, topic, partition, **consumer):
        self._logger = logging.getLogger('SHIELD.SIMPLE.WORKER')
        self._logger.info('Initializing Simple Worker  process...')

        self._interval = interval
        self._isalive = True
        self._processes = processes

        # .............................init Kafka Consumer
        self.Consumer = Consumer(**consumer)
        self.Consumer.assign(topic, [int(partition)])

        # .............................set up local staging area
        self._tmpdir = tempfile.mkdtemp(prefix='_SW.',
                                        dir=tempfile.gettempdir())
        self._logger.info('Use directory "{0}" as local staging area.'.format(
            self._tmpdir))

        # .............................define a process pool object
        self._pool = Pool(self._processes, init_child)
        self._logger.info(
            'Master Collector will use {0} parallel processes.'.format(
                self._processes))

        signal.signal(signal.SIGUSR1, self.kill)
        self._logger.info('Initialization completed successfully!')
Exemple #5
0
def event_loop():
    "this is the main event loop where everything happens"
    # this isn't being called during sys.exit :/
    atexit.register(config.STOP_EVENT.set)
    tailer_threads = []
    # initiate threads to tail from files
    for fdict in config.FILES:
        for tailer in path_tailers(fdict):
            tailer.start()
            tailer_threads.append(tailer)
    # initiate threads to consume logs pushed into queue
    consumer_threads = []
    for i in range(config.NUM_CONSUMERS):
        consumer = Consumer(config.LOG_QUEUE, config.STOP_EVENT,
                            poll_interval=config.POLL_INTERVAL,
                            name=CONSUMER_MAP.get(i % 4))
        consumer.start()
        consumer_threads.append(consumer)
    # this part continues to block even though all
    # queue items were processed :/
    # LOG_QUEUE.join() # Commenting for now...
    # logging.debug('finished processing queue')
    while True:
        try:
            time.sleep(10)
        except KeyboardInterrupt:
            config.STOP_EVENT.set()
            print
            for consumer in consumer_threads:
                logging.info(
                    '{0.name} sent {0.sent_records} records!'.format(consumer))
            sys.exit('shutting down streamer...')
Exemple #6
0
def main():
    config = {}
    with open('./config.txt', 'r') as file:
        for line in file:
            line = line.rstrip()
            key, val = line.split('=')
            config[key] = val
    file.close()

    captchaList = []
    condition = threading.Condition()

    producer = Producer(sitekey=config['sitekey'],
                        twocaptchaKey=config['twocaptchaKey'],
                        condition=condition,
                        producerNo=int(config['producerThread']),
                        captchaList=captchaList)
    producer.start()

    for i in range(int(config['consumerThread'])):
        consumer = Consumer(url=config['url'],
                            productNo=config['productNo'],
                            raffleNo=config['raffleNo'],
                            areCode=config['phoneAreaCode'],
                            domain=config['catchAllDomain'],
                            prefix=config['catchAllPrefix'],
                            condition=condition,
                            captchaList=captchaList)
        consumer.start()
def main():
    # 创建 socket 对象
    server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    # 获取本地主机名
    host = socket.gethostname()
    port = 9999
    # 绑定端口号
    server_socket.bind((host, port))
    # 设置最大连接数,超过后排队
    server_socket.listen(5)

    # 线程池队列
    work_queue = queue.Queue()

    # 消息队列
    message_queue = queue.Queue()

    # 创建一个有4个线程的线程池
    thread_pool = ThreadPoolManger(4, work_queue)

    # 启动生产者进程
    p = Producer()
    p.start()

    # 启动消费者进程
    c = Consumer()
    c.start()

    while True:
        # 建立客户端连接
        client_socket, addr = server_socket.accept()
        t = threading.Thread(target=tcp_link, args=(client_socket, addr, thread_pool, message_queue))
        t.start()
Exemple #8
0
    def __init__(self):
        super().__init__()
        self.ui = Ui_MainWindow()
        self.ui.setupUi(self)

        # буфер для отправленных, но еще не обработанных сообщений
        self.buffer = queue.Queue()
        # потоки для обработки информации
        self.sentThread = QThread()
        self.sentObj = Producer(self.sem, self.buffer)
        self.sentObj.moveToThread(self.sentThread)

        self.n = 1
        self.getThreadsPool = [QThread()]
        self.getObjs = [Consumer(self.sem, self.sem_forBuffer, 1, self.buffer)]
        self.getObjs[0].moveToThread(self.getThreadsPool[0])



        self.ui.sendBt.clicked.connect(self.sentObj.run)
        self.ui.sendBt.clicked.connect(self.check)
        self.sentObj.message_sented.connect(self.getObjs[0].run)
        self.sentObj.message_sented.connect(self.addSendedMessage)
        self.getObjs[0].message_getted.connect(self.addGettedMessage)
        self.ui.okBt.clicked.connect(self.change_threadNumber)


        self.sem_forBuffer.release()
        self.sentThread.start()
        self.getThreadsPool[0].start()
 def __init__(self):
   self.url_counter = 1
   self.document_client = DocumentClient()
   self.indexing_client = IndexingClient()
   self.pagerank_client = PagerankClient()
   self.producer = Producer('url_queue')
   self.consumer = Consumer('url_queue')
Exemple #10
0
 def __init__(self):
     self.spark = SparkSession.builder.appName('kmeans').getOrCreate()
     self.conf = SparkConf().setMaster('local').setAppName('kmeans')
     self.sc = SparkContext(conf=self.conf)
     self.consumer = Consumer('bus', 'localhost')
     self.stream = self.consumer.get_stream()
     self.kafka_stream = ConsumerKafka('bus', 'localhost')
Exemple #11
0
def main(flag, host):
    message = Consumer(flag, host)
    message = message.pull_message()
    print message.callback()
    res = es.index(index="bus", doc_type='json', body=message)
    # filter(res)
    # print res
    '''
Exemple #12
0
def test_consumer_no_commits_returns_unix_start(db):
    conn, cur = db
    consumer = Consumer(conn, "foo", "offsets")
    cur.execute(consumer.create_query)
    conn.commit()

    offset = consumer.get_offset()
    assert offset == datetime(1970, 1, 1)
Exemple #13
0
 def __init__(self,
              broker_url='amqp://localhost',
              queue_prefix=None,
              exchange='messagebus'):
     self.broker_url = broker_url
     self.consumer = Consumer(self.broker_url, queue_prefix, exchange)
     self._queue_prefix = queue_prefix
     self.exchange = exchange
Exemple #14
0
def test_qps():
    # 创建生产者
    p = Producer(ip=g_ip, user=g_user, password=g_password)
    p.producer_declare()
    p.create_exchange(g_exchange, "topic")

    # 创建消费者
    consumers = []

    for queue_name in g_queue_name:
        for i in range(0, 3):
            consumers.append(
                Consumer(ip=g_ip, user=g_user, password=g_password))
            consumers[len(consumers) - 1].start_consumer(
                g_exchange, queue_name + str(i), queue_name + str(i))

    time.sleep(10)  # 等待10S, 让消费者绑定完成
    log.info("[test_qps] starting ...")

    try:
        target_time = g_test_secs
        start = time.time()
        stop = False
        while not stop:
            for queue_name in g_queue_name:
                for i in range(0, 3):
                    time.sleep(g_sleep_secs)
                    p.publish(g_exchange, queue_name + str(i),
                              '{"msg":"this is a test!"}')
                    curr = time.time()
                    if (curr - start) >= target_time:
                        stop = True
                        break
                if stop:
                    break

    except Exception as err:
        log.error("[test_qps] error: " + str(err))
    finally:
        for queue_name in g_queue_name:
            for i in range(0, 3):
                p.publish(g_exchange, queue_name + str(i), "quit")
        p.close()

        recev = 0
        last_time = 0.0
        for c in consumers:
            c.join()
            recev += c.number_of_msg()
            if c.stop_consume_time() > last_time:
                last_time = c.stop_consume_time()

        log.info("[test_qps] %d msg have been sent, start at %f" %
                 (p.number_of_msg(), p.start_publish_time()))
        log.info("[test_qps] %d msg have been received, end at %f" %
                 (recev, last_time))
        log.info("[test_qps] QPS: %f" % (recev /
                                         (last_time - p.start_publish_time())))
 def __init__(self):
     self.consumer = Consumer('bus', 'localhost')
     self.stream = self.consumer.get_stream()
     self.kafka_stream = ConsumerKafka('bus', 'localhost')
     self.cleaned_stream = self.stream.map(self.clean_up)
     self.conf = SparkConf().setMaster('local').setAppName(
         'linear_regression')
     self.sc = SparkContext(conf=self.conf)
     self.spark = SparkSession(self.sc)
def start_stop_consumer(flag):
    consumer = TrendingHashtagConsumer()
    consumer1 = Consumer()
    if flag:
        consumer.start()
        consumer1.start()
    else:
        consumer.stop()
        consumer1.stop()
Exemple #17
0
def test_consumer_returns_last_commit(db):
    conn, _ = db
    consumer = Consumer(conn, "foo", "offsets")

    new_offset = consumer.new_offset()
    consumer.commit(new_offset)

    offset = consumer.get_offset()
    assert offset == new_offset
Exemple #18
0
def test_consumer_creates_table_if_not_exists(db):
    conn, _ = db

    consumer = Consumer(conn, "foo", "offsets")
    offset = consumer.get_offset()
    assert offset == datetime(1970, 1, 1)

    offset = consumer.get_offset()
    assert offset == datetime(1970, 1, 1)
def test_consumer_duplicate_email(db_session, duplicate_email):
    """
    Test to detemine unique contraint violation is handled
    properly in code.
    """
    c = Consumer()
    for data in duplicate_email:
        task_id = c.s(**data).apply_async()

    all_users = db_session.query(User).all()
    assert len(all_users) == len(duplicate_email) - 1
Exemple #20
0
def run():
    conn = InfluxDBClient(**config.INFLUX_CONN_SETTING)
    conn.create_database(config.INFLUX_CONN_SETTING['database'])
    conn.create_retention_policy(
        name=f"{config.influx_config['database']}_policy",
        duration=config.influx_config['retention'],
        replication='1',
        database=config.influx_config['database'])
    conn.close()
    Thread(target=Producer().run).start()
    Thread(target=Consumer().run).start()
def test_consumer_valid_data(db_session, valid_data):
    """
    Test consumer class with valid data. Verify the results
    by querying the database.
    """
    c = Consumer()
    task_id = c.s(**valid_data).apply_async()
    all_users = db_session.query(User).all()

    assert len(all_users) == 1
    assert all_users[0].email == valid_data['email']
def test_consumer_invalid_data(db_session, invalid_data):
    """
    Test with invalid data schema should not load anything
    to database.
    """
    c = Consumer()
    task_id = c.s(**invalid_data).apply_async()
    all_users = db_session.query(User).all()

    assert isinstance(task_id, celery.result.EagerResult)
    assert len(all_users) == 0
Exemple #23
0
def run(ctx):
    """Run Bounced Email Service"""
    from handler import Handler
    from consumer import Consumer

    try:
        handler = Handler(ctx.obj)
        consumer = Consumer(ctx.obj, handler)
        consumer.run()
    except KeyboardInterrupt:
        consumer.stop()
Exemple #24
0
def test_consumer_passes_offsets_to_work_fn(db):
    conn, _ = db
    consumer = Consumer(conn, "foo", "offsets")

    new_offset = consumer.new_offset()
    consumer.commit(new_offset)

    def work(old, new):
        assert old == new_offset

    consumer.do_work(work)
Exemple #25
0
def connect_consumer(retries=0):
    print('Connecting to RabbitMQ server')
    try:
        Consumer().start()
    except pika.exceptions.AMQPConnectionError:
        if retries >= 2:
            raise

        print('Connection error. Retrying in 5 seconds.')
        time.sleep(5)
        connect_consumer(retries + 1)
Exemple #26
0
def consume_data(topic, borker, group_id):
    consumer = Consumer(brokers=[borker], group_id=group_id)

    consumer.subscribe([topic, "thing-event"])

    for msg in consumer:
        try:
            value = msg.value
            print(value)
        except Exception as err:
            print(err)
            continue
    def __init__(self, output_file_name):
        self.tasks_queue = multiprocessing.JoinableQueue()
        self.write_output_lock = multiprocessing.Lock()
        self.num_consumers = multiprocessing.cpu_count() * 2

        self.consumers = [
            Consumer(self.tasks_queue, output_file_name,
                     self.write_output_lock) for i in range(self.num_consumers)
        ]
        logger.info('Starting %d consumers' % self.num_consumers)
        for w in self.consumers:
            w.start()
Exemple #28
0
def test_consumer_throws_consumer_work_error_on_exception(db):
    conn, _ = db
    consumer = Consumer(conn, "foo", "offsets")

    new_offset = consumer.new_offset()
    consumer.commit(new_offset)

    def work(old, new):
        assert old != new_offset

    with pytest.raises(ConsumerWorkError):
        consumer.do_work(work)
Exemple #29
0
def cmd():
    def callback(cmd):
        cmd_name = next(iter(cmd['cmd']))
        payload = {
            'cmdexe': {
                cmd_name: f'processed {cmd["cmd"][cmd_name]}',
            }
        }
        producer = Producer(payload)
        Container(producer).run()

    consumer = Consumer(callback)
    Container(consumer).run()
def main():
    # max_buffer = input.insert_number('insert maximum buffer capacity: ')

    # buffer = Buffer(max_buffer)
    monitor = Monitor()
    producer = Producer(monitor)
    consumer = Consumer(monitor)

    try:
        threading.Thread(target=producer.auto_produce).start()
        threading.Thread(target=consumer.auto_consume).start()
    except:
        print(Logger.error('Unable to start thread'))