示例#1
0
def main():
    config = {}
    with open('./config.txt', 'r') as file:
        for line in file:
            line = line.rstrip()
            key, val = line.split('=')
            config[key] = val
    file.close()

    captchaList = []
    condition = threading.Condition()

    producer = Producer(sitekey=config['sitekey'],
                        twocaptchaKey=config['twocaptchaKey'],
                        condition=condition,
                        producerNo=int(config['producerThread']),
                        captchaList=captchaList)
    producer.start()

    for i in range(int(config['consumerThread'])):
        consumer = Consumer(url=config['url'],
                            productNo=config['productNo'],
                            raffleNo=config['raffleNo'],
                            areCode=config['phoneAreaCode'],
                            domain=config['catchAllDomain'],
                            prefix=config['catchAllPrefix'],
                            condition=condition,
                            captchaList=captchaList)
        consumer.start()
def main():
    # 创建 socket 对象
    server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    # 获取本地主机名
    host = socket.gethostname()
    port = 9999
    # 绑定端口号
    server_socket.bind((host, port))
    # 设置最大连接数,超过后排队
    server_socket.listen(5)

    # 线程池队列
    work_queue = queue.Queue()

    # 消息队列
    message_queue = queue.Queue()

    # 创建一个有4个线程的线程池
    thread_pool = ThreadPoolManger(4, work_queue)

    # 启动生产者进程
    p = Producer()
    p.start()

    # 启动消费者进程
    c = Consumer()
    c.start()

    while True:
        # 建立客户端连接
        client_socket, addr = server_socket.accept()
        t = threading.Thread(target=tcp_link, args=(client_socket, addr, thread_pool, message_queue))
        t.start()
示例#3
0
def event_loop():
    "this is the main event loop where everything happens"
    # this isn't being called during sys.exit :/
    atexit.register(config.STOP_EVENT.set)
    tailer_threads = []
    # initiate threads to tail from files
    for fdict in config.FILES:
        for tailer in path_tailers(fdict):
            tailer.start()
            tailer_threads.append(tailer)
    # initiate threads to consume logs pushed into queue
    consumer_threads = []
    for i in range(config.NUM_CONSUMERS):
        consumer = Consumer(config.LOG_QUEUE, config.STOP_EVENT,
                            poll_interval=config.POLL_INTERVAL,
                            name=CONSUMER_MAP.get(i % 4))
        consumer.start()
        consumer_threads.append(consumer)
    # this part continues to block even though all
    # queue items were processed :/
    # LOG_QUEUE.join() # Commenting for now...
    # logging.debug('finished processing queue')
    while True:
        try:
            time.sleep(10)
        except KeyboardInterrupt:
            config.STOP_EVENT.set()
            print
            for consumer in consumer_threads:
                logging.info(
                    '{0.name} sent {0.sent_records} records!'.format(consumer))
            sys.exit('shutting down streamer...')
示例#4
0
def start_stop_consumer(flag):
    consumer = TrendingHashtagConsumer()
    consumer1 = Consumer()
    if flag:
        consumer.start()
        consumer1.start()
    else:
        consumer.stop()
        consumer1.stop()
示例#5
0
 def wake_consumer(self):
     """唤醒消费者"""
     self.consumer_poison_list = []
     self.consumer_list = []
     for _ in xrange(self.consumer_count):
         tmp_poison = Event()
         consumer = Consumer(queue=self.queue, poison=tmp_poison, consume=self.consume)
         consumer.start()
         self.consumer_poison_list.append(tmp_poison)
         self.consumer_list.append(consumer)
示例#6
0
def start():
    logging.basicConfig(level=logging.INFO)
    logging.info("Started multiprocessing...")
    product_queue = Queue()
    task_queue = Queue()
    producer = Producer(task_queue)
    consumer = Consumer(task_queue, product_queue)
    producer.start()
    consumer.start()
    while True:
        product = product_queue.get()
        print("Received: " + str(product))
示例#7
0
def createAndRunConsumer(triggerFQN, params, record=True):
    if app.config['TESTING'] == True:
        logging.debug("Just testing")
    else:
        # generate a random uuid for new triggers
        if not 'uuid' in params:
            params['uuid'] = str(uuid.uuid4())

        consumer = Consumer(triggerFQN, params)
        consumer.start()
        consumers.addConsumerForTrigger(triggerFQN, consumer)

        if record:
            database.recordTrigger(triggerFQN, params)
示例#8
0
    def createAndRunConsumer(self, doc):
        triggerFQN = doc['_id']

        # Create a representation for this trigger, even if it is disabled
        # This allows it to appear in /health as well as allow it to be deleted
        # Creating this object is lightweight and does not initialize any connections
        consumer = Consumer(triggerFQN, doc)
        self.consumers.addConsumerForTrigger(triggerFQN, consumer)

        if self.__isTriggerDocActive(doc):
            logging.info('[{}] Trigger was determined to be active, starting...'.format(triggerFQN))
            consumer.start()
        else:
            logging.info('[{}] Trigger was determined to be disabled, not starting...'.format(triggerFQN))
示例#9
0
    def __init__(self, write_key=None, host='https://hosted.rudderlabs.com', debug=False,
                 max_queue_size=10000, send=True, on_error=None, flush_at=100,
                 flush_interval=0.5, max_retries=3, sync_mode=False,
                 timeout=10, thread=1):
        require('write_key', write_key, string_types)

        self.queue = queue.Queue(max_queue_size)
        self.write_key = write_key
        self.on_error = on_error
        self.debug = debug
        self.send = send
        self.sync_mode = sync_mode
        self.host = host
        self.timeout = timeout

        if sync_mode:
            self.consumers = None
        else:
            # On program exit, allow the consumer thread to exit cleanly.
            # This prevents exceptions and a messy shutdown when the
            # interpreter is destroyed before the daemon thread finishes
            # execution. However, it is *not* the same as flushing the queue!
            # To guarantee all messages have been delivered, you'll still need
            # to call flush().
            if send:
                atexit.register(self.join)
            for n in range(thread):
                self.consumers = []
                consumer = Consumer(
                    self.queue, write_key, host=host, on_error=on_error,
                    flush_at=flush_at, flush_interval=flush_interval,
                    retries=max_retries, timeout=timeout,
                )
                self.consumers.append(consumer)

                # if we've disabled sending, just don't start the consumer
                if send:
                    consumer.start()
示例#10
0
def main():
    # 创建 socket 对象
    server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    # 获取本地主机名
    host = socket.gethostname()
    port = 9999
    # 绑定端口号
    server_socket.bind((host, port))
    # 设置最大连接数,超过后排队
    server_socket.listen(5)

    # 启动生产者进程
    p = Producer()
    p.start()

    # 启动消费者进程
    c = Consumer()
    c.start()

    while True:
        # 建立客户端连接
        client_socket, addr = server_socket.accept()
        t = threading.Thread(target=tcp_link, args=(client_socket, addr))
        t.start()
示例#11
0
def _execute(args, awsargs):
    """What executes when your application is run. It will spawn the
    specified number of threads, reading in the data from the specified file,
    adding them to the shared queue. The worker threads will work the queue,
    and the application will exit when the queue is empty.

    :param argparse.namespace args: The parsed cli arguments

    """
    error_exit = multiprocessing.Event()
    work_queue = multiprocessing.Queue()
    units = multiprocessing.Value('f', 0, lock=True)
    updated_count = multiprocessing.Value('L', 0, lock=True)

    producers = []
    LOGGER.info('Starting %i producers', args.producers)
    for index in range(0, args.producers):
        done = multiprocessing.Event()
        prod = Producer(
            kwargs={
                'done': done,
                'error_exit': error_exit,
                'segment': index,
                'cliargs': args,
                'awsargs': awsargs,
                'unit_count': units,
                'work_queue': work_queue
            })
        prod.start()
        producers.append((prod, done))

    consumers = []
    LOGGER.info('Starting %i consumers', args.consumers)
    for index in range(0, args.consumers):
        done = multiprocessing.Event()
        con = Consumer(
            kwargs={
                'done': done,
                'error_exit': error_exit,
                'id': index,
                'cliargs': args,
                'awsargs': awsargs,
                'updated_count': updated_count,
                'unit_count': units,
                'work_queue': work_queue
            })
        con.start()
        consumers.append((con, done))

    start_time = time.time()

    # All of the data has been added to the queue, wait for things to finish
    LOGGER.debug('Waiting for producers to finish')
    while any([p.is_alive() for (p, d) in producers]):
        try:
            time.sleep(0.1)
        except KeyboardInterrupt:
            error_exit.set()
            break
    LOGGER.info('All producers dead')

    # Wait till queue is cleared, then send exit
    while work_queue.qsize() > 0:
        LOGGER.debug('Work queue size is %s, updated_count is %s',
                     work_queue.qsize(), updated_count.value)
        time.sleep(0.1)
    LOGGER.info('Sending exit signal')
    error_exit.set()

    LOGGER.debug('Waiting for consumers to finish')
    while any([p.is_alive() for (p, d) in consumers]):
        try:
            time.sleep(0.1)
        except KeyboardInterrupt:
            error_exit.set()
            break
    LOGGER.info('All consumers dead')

    LOGGER.info('Updated {:d} records, consuming {:,} DynamoDB units in '
                '{:.2f} seconds'.format(updated_count.value, units.value,
                                        time.time() - start_time))
    sys.exit(0)
示例#12
0
LOGGER = logging.getLogger()

# Handle SIGTERM and SIGQUIT the same way SIGINT is handled
signal.signal(signal.SIGTERM, signal.default_int_handler)
signal.signal(signal.SIGQUIT, signal.default_int_handler)


def on_message(channel: Channel, method: Basic.Deliver, props: BasicProperties,
               body: bytes):
    """Called when a message is delivered from RabbitMQ."""
    LOGGER.info(
        f"Received message # {method.delivery_tag} from {props.app_id}")

    LOGGER.info(f"Acknowledging message # {method.delivery_tag}")
    channel.basic_ack(method.delivery_tag)


consumer = Consumer(
    host=getenv("RABBITMQ_HOST", "rabbitmq"),
    queue=getenv("TASKS_QUEUE_NAME", "tasks"),
    prefetch_value=getenv("PREFETCH_VALUE", 1),
    on_message=on_message,
)

# This is a blocking method
consumer.start()

if consumer.should_reconnect:
    # Raise error to make the service container exit with a non-zero code
    raise ConnectionAbortedError(ECONNABORTED, "Service should be restarted")
示例#13
0
class MessageBus:
    def __init__(self,
                 broker_url='amqp://localhost',
                 queue_prefix=None,
                 exchange='messagebus'):
        self.broker_url = broker_url
        self.consumer = Consumer(self.broker_url, queue_prefix, exchange)
        self._queue_prefix = queue_prefix
        self.exchange = exchange

    def publish(self, message, payload={}):
        self._publish(message, payload)

    def _publish(self, message, payload, correlation_id=None):
        body = json.dumps(self._prepare_payload(payload), ensure_ascii=False)
        connection = pika.BlockingConnection(
            pika.URLParameters(self.broker_url))
        channel = connection.channel()

        properties = None
        if correlation_id:
            properties = pika.BasicProperties(correlation_id=correlation_id)

        channel.basic_publish(exchange=self.exchange,
                              routing_key=message,
                              body=body,
                              properties=properties)
        connection.close()

    def _prepare_payload(self, payload):
        def serialize(value):
            if isinstance(value, datetime.datetime):
                return value.isoformat()
            return value

        proc_payload = {k: serialize(v) for k, v in payload.items()}
        if 'timestamp' not in proc_payload:
            proc_payload['timestamp'] = datetime.datetime.utcnow().isoformat()
        return proc_payload

    def subscribe(self, message, callback):
        self.consumer.subscribe(message, callback)

    def subscribe_and_publish_response(self, message, callback):
        def subscribe_callback(request_payload, **kwargs):
            correlation_id = kwargs['properties'].correlation_id
            response = callback(request_payload)
            self._publish(message + '.answered', response, correlation_id)

        self.consumer.subscribe(message,
                                subscribe_callback,
                                transient_queue=True)

    def publish_and_get_response(self, message, payload, timeout_secs=5):
        sent_correlation = str(uuid.uuid1())
        consumer_ready = Event()

        def on_consumer_ready():
            consumer_ready.set()

        consumer = Consumer(self.broker_url, self._queue_prefix, self.exchange)
        consumer.on_connection_setup_finished = on_consumer_ready
        response = {}
        response_received = Event()

        def response_callback(response_payload, **kwargs):
            if not sent_correlation == kwargs['properties'].correlation_id:
                return
            response['payload'] = response_payload
            response_received.set()

        def wait_for_response():
            consumer.subscribe(message + '.answered',
                               response_callback,
                               transient_queue=True)
            consumer.start()

        thread = Thread(target=wait_for_response)
        thread.daemon = True
        thread.start()

        consumer_ready.wait(2)
        self._publish(message, payload, correlation_id=sent_correlation)
        timed_out = not response_received.wait(timeout_secs)
        if timed_out:
            raise MessageBusTimeoutError()
        consumer.stop()
        return response.get('payload')

    def start(self):
        self.consumer.start()

    def stop(self):
        self.consumer.stop()
示例#14
0
    
    def readqueue(self):
        queue = Queue.Queue()
        try:
            f = open('user.queue', 'r')
            for line in f:
                queue.queue = deque(eval(line))
        except:
            ""
        finally:
            return queue

if __name__=="__main__":
    bullyalgorithm(opt=False)

    producer = Producer()
    producer.setDaemon(True)
    producer.start()

    consumer = Consumer(producer.queue)
    consumer.setDaemon(True)
    consumer.start()
 
    acceptor = Acceptor()
    acceptor.setDaemon(True)
    acceptor.start()
   
    while threading.active_count() > 0:
        time.sleep(0.1)