예제 #1
0
        def config_request_rpc(self):
            self.correlation_id = uuid()
            callback_queue = Queue(uuid(),
                                   durable=False,
                                   auto_delete=True,
                                   max_priority=4,
                                   consumer_arguments={'x-priority': 4})

            self.producer.publish(
                '',
                exchange='',
                routing_key='config-request-queue',
                reply_to=callback_queue.name,
                correlation_id=self.correlation_id,
                retry=True,
                declare=[
                    Queue('config-request-queue',
                          durable=False,
                          max_priority=4,
                          consumer_arguments={'x-priority': 4}), callback_queue
                ],
                priority=4)
            with Consumer(self.connection,
                          on_message=self.handle_config_request_reply,
                          queues=[callback_queue],
                          no_ack=True):
                while self.rules is None and self.monitors is None:
                    self.connection.drain_events()
예제 #2
0
 def rpc(self,
         message: dict,
         routing_key: str,
         timeout: int = 0) -> Message:
     """Enqeue message and wait for reply (RPC)"""
     self.response = None
     callback_queue = Queue(uuid(), exclusive=True, auto_delete=True)
     correlation_id = uuid()
     with Producer(exchange=self.exchange,
                   channel=self.channel) as producer:
         producer.publish(
             body=message,
             serializer="json",
             routing_key=routing_key,
             declare=[callback_queue],
             reply_to=callback_queue.name,
             correlation_id=correlation_id,
             retry=self.retry,
             retry_policy=self.retry_policy,
         )
         self.logger.debug("Published RPC %s: %s", routing_key, message)
     with Consumer(
             self.connection,
             accept=["json"],
             on_message=self.on_rpc_response,
             queues=[callback_queue],
             no_ack=True,
     ):
         while self.response is None:
             try:
                 self.connection.drain_events(timeout=timeout)
             except socket.timeout:
                 raise AMQPTimeoutException
     return self.response
예제 #3
0
파일: tester.py 프로젝트: traderose/artemis
 def hijack_comment(db_con, connection, hijack_key, comment):
     correlation_id = uuid()
     callback_queue = Queue(
         uuid(),
         channel=connection.default_channel,
         durable=False,
         exclusive=True,
         auto_delete=True,
         max_priority=4,
         consumer_arguments={"x-priority": 4},
     )
     with connection.Producer() as producer:
         producer.publish(
             {
                 "key": hijack_key,
                 "comment": comment
             },
             exchange="",
             routing_key="db-hijack-comment",
             retry=True,
             declare=[callback_queue],
             reply_to=callback_queue.name,
             correlation_id=correlation_id,
             priority=4,
             serializer="ujson",
         )
     while True:
         if callback_queue.get():
             break
         time.sleep(0.1)
     result = hijack_action_test_result(db_con, hijack_key, "comment",
                                        comment)
     assert (result is True
             ), 'Action "hijack_comment" for hijack id #{0} failed'.format(
                 hijack_key)
예제 #4
0
파일: tester.py 프로젝트: icepaule/artemis
 def hijack_multiple_action(db_con, connection, hijack_keys, action):
     correlation_id = uuid()
     callback_queue = Queue(
         uuid(),
         channel=connection.default_channel,
         durable=False,
         exclusive=True,
         auto_delete=True,
         max_priority=4,
         consumer_arguments={"x-priority": 4},
     )
     with connection.Producer() as producer:
         producer.publish(
             {"keys": hijack_keys, "action": action},
             exchange="",
             routing_key="db-hijack-multiple-action",
             retry=True,
             declare=[callback_queue],
             reply_to=callback_queue.name,
             correlation_id=correlation_id,
             priority=4,
         )
     while True:
         if callback_queue.get():
             break
         time.sleep(0.1)
예제 #5
0
파일: tester.py 프로젝트: traderose/artemis
 def change_conf(connection, new_config, old_config, comment):
     changes = "".join(difflib.unified_diff(new_config, old_config))
     if changes:
         correlation_id = uuid()
         callback_queue = Queue(
             uuid(),
             channel=connection.default_channel,
             durable=False,
             auto_delete=True,
             max_priority=4,
             consumer_arguments={"x-priority": 4},
         )
         with connection.Producer() as producer:
             producer.publish(
                 {
                     "config": new_config,
                     "comment": comment
                 },
                 exchange="",
                 routing_key="config-modify-queue",
                 serializer="yaml",
                 retry=True,
                 declare=[callback_queue],
                 reply_to=callback_queue.name,
                 correlation_id=correlation_id,
                 priority=4,
             )
         while True:
             if callback_queue.get():
                 break
             time.sleep(0.1)
예제 #6
0
파일: tester.py 프로젝트: traderose/artemis
 def hijack_multiple_action(db_con, connection, hijack_keys, action):
     correlation_id = uuid()
     callback_queue = Queue(
         uuid(),
         channel=connection.default_channel,
         durable=False,
         exclusive=True,
         auto_delete=True,
         max_priority=4,
         consumer_arguments={"x-priority": 4},
     )
     with connection.Producer() as producer:
         producer.publish(
             {
                 "keys": hijack_keys,
                 "action": action
             },
             exchange="",
             routing_key="db-hijack-multiple-action",
             retry=True,
             declare=[callback_queue],
             reply_to=callback_queue.name,
             correlation_id=correlation_id,
             priority=4,
             serializer="ujson",
         )
     while True:
         msg = callback_queue.get()
         if msg:
             assert (
                 msg.payload["status"] == "accepted"
             ), 'Action "{}" for [{}] failed with reason: {}'.format(
                 action, hijack_keys, msg.payload.get("reason", ""))
             break
         time.sleep(0.1)
예제 #7
0
        def config_request_rpc(self) -> NoReturn:
            """
            Initial RPC of this service to request the configuration.
            The RPC is blocked until the configuration service replies back.
            """
            self.correlation_id = uuid()
            callback_queue = Queue(uuid(),
                                   durable=False,
                                   auto_delete=True,
                                   max_priority=4,
                                   consumer_arguments={'x-priority': 4})

            self.producer.publish(
                '',
                exchange='',
                routing_key='config-request-queue',
                reply_to=callback_queue.name,
                correlation_id=self.correlation_id,
                retry=True,
                declare=[
                    Queue('config-request-queue',
                          durable=False,
                          max_priority=4,
                          consumer_arguments={'x-priority': 4}), callback_queue
                ],
                priority=4)
            with Consumer(self.connection,
                          on_message=self.handle_config_request_reply,
                          queues=[callback_queue],
                          no_ack=True):
                while self.rules is None:
                    self.connection.drain_events()
            log.debug('{}'.format(self.rules))
예제 #8
0
파일: actions.py 프로젝트: leopoul/artemis
    def send(self, new_config, old_config, comment):
        changes = "".join(difflib.unified_diff(new_config, old_config))
        if changes:
            log.debug("Send 'new config'")
            self.response = None
            self.correlation_id = uuid()
            callback_queue = Queue(
                uuid(),
                durable=False,
                auto_delete=True,
                max_priority=4,
                consumer_arguments={"x-priority": 4},
            )
            with Connection(RABBITMQ_URI) as connection:
                with Producer(connection) as producer:
                    producer.publish(
                        {
                            "config": new_config,
                            "comment": comment
                        },
                        exchange="",
                        routing_key="configuration.rpc.modify",
                        serializer="yaml",
                        retry=True,
                        declare=[callback_queue],
                        reply_to=callback_queue.name,
                        correlation_id=self.correlation_id,
                        priority=4,
                    )
                with Consumer(
                        connection,
                        on_message=self.on_response,
                        queues=[callback_queue],
                        accept=["ujson"],
                ):
                    while self.response is None:
                        connection.drain_events()

            if self.response["status"] == "accepted":
                log.info("new configuration accepted:\n{}".format(changes))
                return "Configuration file updated.", True

            log.info("invalid configuration:\n{}".format(new_config))
            return (
                "Invalid configuration file.\n{}".format(
                    self.response["reason"]),
                False,
            )
        return "No changes found on the new configuration.", False
예제 #9
0
        def on_modified(self, event):
            if event.is_directory:
                return None

            if event.src_path == self.path:
                with open(self.path, "r") as f:
                    content = f.readlines()
                # Taken any action here when a file is modified.
                changes = "".join(difflib.unified_diff(self.content, content))
                if changes:
                    self.response = None
                    self.correlation_id = uuid()
                    callback_queue = Queue(
                        uuid(),
                        durable=False,
                        auto_delete=True,
                        max_priority=4,
                        consumer_arguments={"x-priority": 4},
                    )
                    with Producer(self.connection) as producer:
                        producer.publish(
                            content,
                            exchange="",
                            routing_key="config-modify-queue",
                            serializer="yaml",
                            retry=True,
                            declare=[callback_queue],
                            reply_to=callback_queue.name,
                            correlation_id=self.correlation_id,
                            priority=4,
                        )
                    with Consumer(
                            self.connection,
                            on_message=self.on_response,
                            queues=[callback_queue],
                            accept=["ujson"],
                    ):
                        while self.response is None:
                            self.connection.drain_events()

                    if self.response["status"] == "accepted":
                        text = "new configuration accepted:\n{}".format(
                            changes)
                        log.info(text)
                        self.content = content
                    else:
                        log.error("invalid configuration:\n{}".format(content))
                    self.response = None
def client_task(func_body, target_queue: 'Queue'):
    logger.info(f"@pyrogram_task_call: {prettify(func_body)}")

    response = None

    def callback(body, message):
        nonlocal response
        response = body
        logger.info(f"@pyrogram_task_response: {body} ")

    # connections
    with Connection('amqp://localhost') as conn:
        # produce
        producer = conn.Producer(serializer='pickle')
        producer.publish(body=func_body,
                         exchange=tg_exchange,
                         routing_key=target_queue.name,
                         declare=[target_queue, callback_queue],
                         retry=True,
                         correlation_id=uuid(),
                         reply_to=callback_queue.name,
                         retry_policy={
                             'interval_start': 0,
                             'interval_step': 2,
                             'interval_max': 30,
                             'max_retries': 30,
                         })
        with Consumer(conn,
                      callbacks=[callback],
                      queues=[callback_queue],
                      no_ack=True):
            while response is None:
                conn.drain_events()
    return response
 def __init__(self, connection, exchange, routing_key):
     self.connection = connection
     self.callback_queue = Queue(uuid(), exclusive=True, auto_delete=True)
     self.exchange = exchange
     self.routing_key = routing_key
     self.response = None
     self.correlation_id = None
예제 #12
0
 def send_task(self, payload, routing_key, local=False, get_response=False, timeout=10):
     if local:
         declare_queues = self.control_queues
     else:
         declare_queues = self.declare_queues
     reply_to = None
     callback_queue = []
     if get_response:
         reply_to = self.callback_queue.name
         callback_queue = [self.callback_queue]
         self.correlation_id = uuid()
     try:
         with producers[self.connection].acquire(block=True, timeout=10) as producer:
             producer.publish(
                 payload,
                 exchange=None if local else self.exchange,
                 declare=declare_queues,
                 routing_key=routing_key,
                 reply_to=reply_to,
                 correlation_id=self.correlation_id,
                 retry=True,
                 headers={'epoch': time.time()},
             )
         if get_response:
             with Consumer(self.connection, on_message=self.on_response, queues=callback_queue, no_ack=True):
                 while self.response is self._response:
                     self.connection.drain_events(timeout=timeout)
             return self.response
     except socket.timeout:
         log.exception("Error waiting for task: '%s' sent with routing key '%s'", payload, routing_key)
     except Exception:
         log.exception("Error queueing async task: '%s'. for %s", payload, routing_key)
예제 #13
0
 def call(self, n):
     self.response = None
     # 唯一标识匹配请求和响应,每次请求都生成
     self.correlation_id = uuid()
     # 作为生产者发送请求
     with Producer(self.connection) as producer:
         producer.publish(
             {'n': n},
             # 使用默认exchange
             exchange='',
             # routing_key和Queue一致
             routing_key='rpc_queue',
             declare=[self.callback_queue],
             # 回应的queue
             reply_to=self.callback_queue.name,
             # 业务ID
             correlation_id=self.correlation_id,
         )
     # 作为消费者接收响应
     with Consumer(self.connection,
                   on_message=self.on_response,
                   queues=[self.callback_queue], no_ack=True):
         while self.response is None:
             # 持续监听,直到回应(阻塞)
             # TODO 非阻塞实现
             self.connection.drain_events()
     return self.response
 async def queue_task(self, task, args, kwargs):
     # TODO implement more options from apply_async. I need at least the
     #  queue option. I don't know what the other commonly used ones are.
     async with self.broker.get_channel() as channel:
         queue = self.celery.conf.task_default_queue
         # TODO who's responsible in celery for creating the queue
         #  I hoped that the consumer would create it so there'd
         #  never be any need to here. But I've seen errors that suggest
         #  other wise.
         await channel.queue_declare(queue, passive=True)
         task_id = uuid()
         message: task_message = self.celery.amqp.create_task_message(
             task_id, task.name, args, kwargs)
         content_type, content_encoding, body = serialization.dumps(
             message.body,
             'json',
         )
         properties = {
             "content_type": content_type,
             "content_encoding": content_encoding,
             "headers": message.headers,
             **message.properties
         }
         body = body.encode(content_encoding)
         await channel.publish(body, '', queue, properties=properties)
         return AsyncResult(self, task_id)
예제 #15
0
    def __init__(self, connection, shared_memory_manager_dict):
        self.connection = connection
        self.shared_memory_manager_dict = shared_memory_manager_dict

        # wait for other needed data workers to start
        wait_data_worker_dependencies(DATA_WORKER_DEPENDENCIES)

        # EXCHANGES
        self.mitigation_exchange = create_exchange(
            "mitigation", connection, declare=True
        )
        self.command_exchange = create_exchange("command", connection, declare=True)

        # QUEUES
        self.mitigate_queue = create_queue(
            SERVICE_NAME,
            exchange=self.mitigation_exchange,
            routing_key="mitigate-with-action",
            priority=2,
        )
        self.unmitigate_queue = create_queue(
            SERVICE_NAME,
            exchange=self.mitigation_exchange,
            routing_key="unmitigate-with-action",
            priority=2,
        )
        self.stop_queue = create_queue(
            "{}-{}".format(SERVICE_NAME, uuid()),
            exchange=self.command_exchange,
            routing_key="stop-{}".format(SERVICE_NAME),
            priority=1,
        )

        log.info("data worker initiated")
예제 #16
0
def nmap_scan_subdomain_host(vhost,workspace,simulation,output_base_dir,config_file=None):
    celery_path = sys.path[0]
    config_nmap_options = config_parser.extract_bb_nmap_options(config_file=config_file)
    config = ConfigParser(allow_no_value=True)
    config.read(['config.ini'])

    vhost_explicitly_out_of_scope = lib.db.is_vhost_explicitly_out_of_scope(vhost, workspace)
    output_host_dir = os.path.normpath(os.path.join(output_base_dir, vhost))
    try:
        os.stat(output_host_dir)
    except:
        os.makedirs(output_host_dir)

    output_file = os.path.normpath(os.path.join(output_host_dir, vhost + "_nmap_tcp_scan.txt"))
    if not vhost_explicitly_out_of_scope:
        #print(config_nmap_options)
        cmd_name = "nmap_tcp_scan"
        try:
            if not simulation:
                populated_command = "nmap " + vhost + config_nmap_options + " -oA " + output_file
            else:
                populated_command = "#nmap " + vhost + config_nmap_options + " -oA " + output_file
        except TypeError:
            print("[!] Error: In the config file, there needs to be one, and only one, enabled tcp_scan command in the nmap_commands section.")
            print("[!]        This determines what ports to scan.")
            exit()
        task_id = uuid()
        utils.create_task(cmd_name, populated_command, vhost, output_file, workspace, task_id)
        result = chain(
            tasks.cel_nmap_scan.si(cmd_name, populated_command, vhost, config_nmap_options, celery_path, task_id,workspace).set(task_id=task_id),
        )()
예제 #17
0
    def __init__(self, connection: Connection,
                 shared_memory_manager_dict: Dict) -> NoReturn:
        self.connection = connection
        self.shared_memory_manager_dict = shared_memory_manager_dict

        # EXCHANGES
        self.hijack_notification_exchange = create_exchange(
            "hijack-notification", connection, declare=True)
        self.command_exchange = create_exchange("command",
                                                connection,
                                                declare=True)

        # QUEUES
        self.hij_log_queue = create_queue(
            SERVICE_NAME,
            exchange=self.hijack_notification_exchange,
            routing_key="hij-log",
            priority=1,
        )
        self.mail_log_queue = create_queue(
            SERVICE_NAME,
            exchange=self.hijack_notification_exchange,
            routing_key="mail-log",
            priority=1,
        )
        self.stop_queue = create_queue(
            "{}-{}".format(SERVICE_NAME, uuid()),
            exchange=self.command_exchange,
            routing_key="stop-{}".format(SERVICE_NAME),
            priority=1,
        )

        log.info("data worker initiated")
예제 #18
0
 def __init__(self, queue_worker):
     self.queue_worker = queue_worker
     self.correlation_id = None
     self.callback_queue = Queue(uuid(), exclusive=True, auto_delete=True)
     self.response = object()
     self._response = self.response
     self._connection = None
예제 #19
0
	def _get_all_scanning_tasks_and_create_db_entries(self):
		task_id = str(uuid())
		logr.debug("Getting scanning tasks for task_id '{0}'.".format(task_id))
		scan_run = self.scanrun_set.create_pending_scan_run_from_sample( task_id)
		scan_run.save()
		jobs = scan_run.get_scan_tasks_and_create_pending_db_entries(timeout=self.scan_timeout)
		logr.debug("task_id '{0}' has '{1}' jobs.".format(task_id, len(jobs)))

		return scan_run, jobs
예제 #20
0
파일: actions.py 프로젝트: leopoul/artemis
 def send(self, hijack_key, prefix, type_, hijack_as, action):
     log.debug(
         "Send 'learn_new_rule - {0}' hijack message with key: {1}".format(
             action, hijack_key))
     self.response = None
     self.correlation_id = uuid()
     callback_queue = Queue(
         uuid(),
         durable=False,
         exclusive=True,
         auto_delete=True,
         max_priority=4,
         consumer_arguments={"x-priority": 4},
     )
     with Connection(RABBITMQ_URI) as connection:
         with Producer(connection) as producer:
             producer.publish(
                 {
                     "key": hijack_key,
                     "prefix": prefix,
                     "type": type_,
                     "hijack_as": hijack_as,
                     "action": action,
                 },
                 exchange="",
                 routing_key="configuration.rpc.hijack-learn-rule",
                 retry=True,
                 declare=[callback_queue],
                 reply_to=callback_queue.name,
                 correlation_id=self.correlation_id,
                 priority=4,
                 serializer="ujson",
             )
         with Consumer(
                 connection,
                 on_message=self.on_response,
                 queues=[callback_queue],
                 accept=["ujson"],
         ):
             while self.response is None:
                 connection.drain_events()
     if self.response["success"]:
         return self.response["new_yaml_conf"], True
     return self.response["new_yaml_conf"], False
예제 #21
0
        def on_modified(self, event):
            if event.is_directory:
                return None
            elif event.src_path == self.path:
                with open(self.path, 'r') as f:
                    content = f.readlines()
                # Taken any action here when a file is modified.
                changes = ''.join(difflib.unified_diff(self.content, content))
                if len(changes) > 0:
                    self.response = None
                    self.correlation_id = uuid()
                    callback_queue = Queue(
                        uuid(),
                        durable=False,
                        auto_delete=True,
                        max_priority=4,
                        consumer_arguments={'x-priority': 4})
                    with Producer(self.connection) as producer:
                        producer.publish(content,
                                         exchange='',
                                         routing_key='config-modify-queue',
                                         serializer='yaml',
                                         retry=True,
                                         declare=[callback_queue],
                                         reply_to=callback_queue.name,
                                         correlation_id=self.correlation_id,
                                         priority=4)
                    with Consumer(self.connection,
                                  on_message=self.on_response,
                                  queues=[callback_queue],
                                  no_ack=True):
                        while self.response is None:
                            self.connection.drain_events()

                    if self.response['status'] == 'accepted':
                        text = 'new configuration accepted:\n{}'.format(
                            changes)
                        log.info(text)
                        self.content = content
                    else:
                        log.error('invalid configuration:\n{}'.format(content))
                    self.response = None
예제 #22
0
        def config_request_rpc(self) -> NoReturn:
            """
            Initial RPC of this service to request the configuration.
            The RPC is blocked until the configuration service replies back.
            """
            self.correlation_id = uuid()
            callback_queue = Queue(
                uuid(),
                durable=False,
                auto_delete=True,
                max_priority=4,
                consumer_arguments={"x-priority": 4},
            )

            self.producer.publish(
                "",
                exchange="",
                routing_key="config-request-queue",
                reply_to=callback_queue.name,
                correlation_id=self.correlation_id,
                retry=True,
                declare=[
                    Queue(
                        "config-request-queue",
                        durable=False,
                        max_priority=4,
                        consumer_arguments={"x-priority": 4},
                    ),
                    callback_queue,
                ],
                priority=4,
                serializer="ujson",
            )
            with Consumer(
                self.connection,
                on_message=self.handle_config_request_reply,
                queues=[callback_queue],
                accept=["ujson"],
            ):
                while self.rules is None:
                    self.connection.drain_events()
            log.debug("{}".format(self.rules))
예제 #23
0
파일: models.py 프로젝트: yangjiu/phagescan
    def _get_all_scanning_tasks_and_create_db_entries(self):
        task_id = str(uuid())
        logr.debug("Getting scanning tasks for task_id '{0}'.".format(task_id))
        scan_run = self.scanrun_set.create_pending_scan_run_from_sample(
            task_id)
        scan_run.save()
        jobs = scan_run.get_scan_tasks_and_create_pending_db_entries(
            timeout=self.scan_timeout)
        logr.debug("task_id '{0}' has '{1}' jobs.".format(task_id, len(jobs)))

        return scan_run, jobs
예제 #24
0
    def __init__(self, connection: Connection,
                 shared_memory_manager_dict: Dict) -> NoReturn:
        self.connection = connection
        self.rule_timer_thread = None
        self.shared_memory_manager_dict = shared_memory_manager_dict

        # wait for other needed data workers to start
        wait_data_worker_dependencies(DATA_WORKER_DEPENDENCIES)

        # EXCHANGES
        self.autoignore_exchange = create_exchange("autoignore",
                                                   connection,
                                                   declare=True)
        self.hijack_exchange = create_exchange("hijack-update",
                                               connection,
                                               declare=True)
        self.command_exchange = create_exchange("command",
                                                connection,
                                                declare=True)

        # QUEUES
        self.autoignore_hijacks_rules_queue = create_queue(
            SERVICE_NAME,
            exchange=self.autoignore_exchange,
            routing_key="hijacks-matching-rule",
            priority=1,
        )
        self.stop_queue = create_queue(
            "{}-{}".format(SERVICE_NAME, uuid()),
            exchange=self.command_exchange,
            routing_key="stop-{}".format(SERVICE_NAME),
            priority=1,
        )

        # DB variables
        self.ro_db = DB(
            application_name="autoignore-data-worker-readonly",
            user=DB_USER,
            password=DB_PASS,
            host=DB_HOST,
            port=DB_PORT,
            database=DB_NAME,
            reconnect=True,
            autocommit=True,
            readonly=True,
        )

        log.info("setting up autoignore checker process...")
        self.autoignore_checker = AutoignoreChecker(
            self.connection, self.shared_memory_manager_dict)
        mp.Process(target=self.autoignore_checker.run).start()
        log.info("autoignore checker set up")

        log.info("data worker initiated")
예제 #25
0
    def dispatch_request(self):
        print('向数据库添加数据')
        user = User()
        user.id = uuid().replace('-','')
        user.name = '小建'
        user.email = '*****@*****.**'
        user.password = user.set_password(password='******')
        db.session.add(user)
        db.session.commit()

        return Response(json.dumps({'msg':'success'}),content_type='application/json')
예제 #26
0
    def config_request_rpc(conn):
        """
        Initial RPC of this service to request the configuration.
        The RPC is blocked until the configuration service replies back.
        """
        correlation_id = uuid()
        callback_queue = Queue(
            uuid(),
            channel=conn.default_channel,
            durable=False,
            auto_delete=True,
            max_priority=4,
            consumer_arguments={"x-priority": 4},
        )

        with conn.Producer() as producer:
            producer.publish(
                "",
                exchange="",
                routing_key="config-request-queue",
                reply_to=callback_queue.name,
                correlation_id=correlation_id,
                retry=True,
                declare=[
                    Queue(
                        "config-request-queue",
                        durable=False,
                        max_priority=4,
                        consumer_arguments={"x-priority": 4},
                    ),
                    callback_queue,
                ],
                priority=4,
                serializer="ujson",
            )

        while True:
            if callback_queue.get():
                break
            time.sleep(0.1)
        print("Config RPC finished")
예제 #27
0
파일: actions.py 프로젝트: leopoul/artemis
 def send(self, hijack_key, comment):
     log.debug(
         "Send 'comment' hijack message with key: {}".format(hijack_key))
     self.response = None
     self.correlation_id = uuid()
     callback_queue = Queue(
         uuid(),
         durable=False,
         exclusive=True,
         auto_delete=True,
         max_priority=4,
         consumer_arguments={"x-priority": 4},
     )
     with Connection(RABBITMQ_URI) as connection:
         with Producer(connection) as producer:
             producer.publish(
                 {
                     "key": hijack_key,
                     "comment": comment
                 },
                 exchange="",
                 routing_key="database.rpc.hijack-comment",
                 retry=True,
                 declare=[callback_queue],
                 reply_to=callback_queue.name,
                 correlation_id=self.correlation_id,
                 priority=4,
                 serializer="ujson",
             )
         with Consumer(
                 connection,
                 on_message=self.on_response,
                 queues=[callback_queue],
                 accept=["ujson"],
         ):
             while self.response is None:
                 connection.drain_events()
     if self.response["status"] == "accepted":
         return "Comment saved.", True
     return "Error while saving.", False
예제 #28
0
 def load_as_sets(connection):
     correlation_id = uuid()
     callback_queue = Queue(
         uuid(),
         channel=connection.default_channel,
         durable=False,
         exclusive=True,
         auto_delete=True,
         max_priority=4,
         consumer_arguments={"x-priority": 4},
     )
     with connection.Producer() as producer:
         producer.publish(
             {},
             exchange="",
             routing_key="conf-load-as-sets-queue",
             retry=True,
             declare=[callback_queue],
             reply_to=callback_queue.name,
             correlation_id=correlation_id,
             priority=4,
             serializer="ujson",
         )
     while True:
         m = callback_queue.get()
         if m:
             if m.properties["correlation_id"] == correlation_id:
                 r = m.payload
                 if not r["success"]:
                     with open("configs/config.yaml") as f1, open(
                         "configs/config3.yaml"
                     ) as f3:
                         new_data = f3.read()
                         old_data = f1.read()
                     Helper.change_conf(
                         connection, new_data, old_data, "online_as_set_test_failed"
                     )
             break
         time.sleep(0.1)
예제 #29
0
 def enqueue(self, message: dict, routing_key: str) -> None:
     """Enqueue message"""
     producer = Producer(exchange=self.exchange, channel=self.channel)
     correlation_id = uuid()
     producer.publish(
         body=message,
         serializer="json",
         routing_key=routing_key,
         correlation_id=correlation_id,
         retry=self.retry,
         retry_policy=self.retry_policy,
     )
     self.logger.debug("Published %s: %s", routing_key, message)
예제 #30
0
def receive(exchange_name, routing_key):
    def bind_and_wait(connection, queue):
        queue.declare(channel=connection.default_channel)
        bind_queue = queue.bind(connection.default_channel)

        recv_cnt = 0
        start = time.time()

        while recv_cnt < LIMIT_UPDATES:
            if bind_queue.get():
                recv_cnt += 1
                if recv_cnt % 1000 == 0:
                    with open("{}-{}".format(exchange_name, routing_key),
                              "w") as f:
                        print(
                            "[!] Throughput for {} on {}:{} = {} msg/s".format(
                                recv_cnt,
                                exchange_name,
                                routing_key,
                                recv_cnt / (time.time() - start),
                            ))
                        f.write(str(int(recv_cnt / (time.time() - start))))
        stop = time.time()
        print("[!] Throughput for {} on {}:{} = {} msg/s".format(
            recv_cnt, exchange_name, routing_key, recv_cnt / (stop - start)))
        with open("{}-{}".format(exchange_name, routing_key), "w") as f:
            f.write(str(int(recv_cnt / (stop - start))))

    print("[+] Receiving {} on {}:{}".format(LIMIT_UPDATES, exchange_name,
                                             routing_key))
    with Connection(RABBITMQ_URI) as connection:
        exchange = Exchange(
            exchange_name,
            channel=connection,
            type="direct",
            durable=False,
            delivery_mode=1,
        )
        exchange.declare()
        queue = Queue(
            "{}".format(uuid()),
            exchange=exchange,
            routing_key=routing_key,
            durable=False,
            auto_delete=True,
            max_priority=1,
            consumer_arguments={"x-priority": 1},
            channel=connection.default_channel,
        )
        bind_and_wait(connection, queue)
    print("[+] Exit recv")
예제 #31
0
파일: actions.py 프로젝트: leopoul/artemis
 def send(self, hijack_keys, action):
     log.debug("Send 'multiple_action - {0}' hijack message with keys: {1}".
               format(action, hijack_keys))
     self.response = None
     self.correlation_id = uuid()
     callback_queue = Queue(
         uuid(),
         durable=False,
         exclusive=True,
         auto_delete=True,
         max_priority=2,
         consumer_arguments={"x-priority": 2},
     )
     with Connection(RABBITMQ_URI) as connection:
         with Producer(connection) as producer:
             producer.publish(
                 {
                     "keys": hijack_keys,
                     "action": action
                 },
                 exchange="",
                 routing_key="database.rpc.hijack-multiple-action",
                 retry=True,
                 declare=[callback_queue],
                 reply_to=callback_queue.name,
                 correlation_id=self.correlation_id,
                 priority=4,
                 serializer="ujson",
             )
         with Consumer(
                 connection,
                 on_message=self.on_response,
                 queues=[callback_queue],
                 accept=["ujson"],
         ):
             while self.response is None:
                 connection.drain_events()
     return self.response["status"] == "accepted"
예제 #32
0
파일: rpc_client.py 프로젝트: Scalr/kombu
 def call(self, n):
     self.response = None
     self.correlation_id = uuid()
     with Producer(self.connection) as producer:
         producer.publish(
             {'n': n},
             exchange='',
             routing_key='rpc_queue',
             declare=[self.callback_queue],
             reply_to=self.callback_queue.name,
             correlation_id=self.correlation_id,
         )
     with Consumer(self.connection,
                   on_message=self.on_response,
                   queues=[self.callback_queue], no_ack=True):
         while self.response is None:
             self.connection.drain_events()
     return self.response
예제 #33
0
파일: rpc_client.py 프로젝트: Scalr/kombu
 def __init__(self, connection):
     self.connection = connection
     self.callback_queue = Queue(uuid(), exclusive=True, auto_delete=True)
예제 #34
0
def handle_life_event(resident):
    """
    Periodic behavior of residents:
    * Check for a pending action in current destination_bldg
    * Fetch the result of the action
    * Apply the result (update a destination_bldg payload or create new ones)
    * Update energy status
    * Check whether it's a composite destination_bldg with smell, & if so get inside
    * Check whether no smell for too long, & if so get outside
    * Look at near-by bldgs
    * Choose a one to process or move to
    * Move to the chosen destination_bldg
    * Choose an action to apply to the destination_bldg's payload (if any)
    * Fire up the action
    :param resident: the acting resident
    :return:
    """
    if not acquire_lock(resident["_id"]):
        logging.warn("Resident {} previous life event is still ongoing, "
                     "aborting.".format(resident["_id"]))
        return

    life_event_id = uuid()
    global logging
    # if you need the worker id: worker=(current_process().index+1)
    logging = logging.bind(resident=resident["name"],
                           life_event_id=life_event_id)

    t1 = datetime.utcnow()
    logging.info(" a "*100)
    logging.info(type(resident))
    logging.info(resident)
    if not isinstance(resident, Resident):
        logging.info(" a1 "*100)
        resident = Resident(resident)
        logging.info(type(resident))
        logging.info(resident)

    # TODO use Redis to improve data integrity
    logging.info("Resident {name} life event invoked..."
                 .format(name=resident.name))

    # Check status of previous action.
    curr_bldg = None
    output_bldgs = None
    try:
        curr_bldg = load_bldg(_id=ObjectId(resident.bldg)) if resident.bldg else None
    except:
        logging.exception("Couldn't load bldg: {}".format(resident.bldg))
    if curr_bldg is not None and resident.processing:
        action_status = resident.get_latest_action(curr_bldg)
        action_result = resident.get_action_result(action_status)
        # logging.info(action_result)
        # check if action is still pending
        if action_status is not None and \
                        action_result is None and \
                resident.is_action_pending(action_status):
            if resident.should_discard_action(action_status):
                resident.discard_action(curr_bldg, action_status)
            else:
                logging.info("Action in {addr} is still pending. "
                             "Doing nothing for now."
                             .format(addr=resident.bldg))
                release_lock(resident["_id"])
                return
        else:
            # yay, we have results
            with time_print(logging, "create result bldg"):
                output_bldgs = create_result_bldgs(curr_bldg, action_result)

        # if we got here, it means that no action is still pending
        with time_print(logging, "finish processing"):
            resident.finish_processing(action_status, curr_bldg, output_bldgs)

    # choose a bldg to move into
    with time_print(logging, "choose bldg"):
        destination_addr, destination_bldg = resident.choose_bldg(curr_bldg)

    # update the bldg at the previous location (if existing),
    # that the resident has left the bldg
    if curr_bldg:
        remove_occupant(curr_bldg)

    # if moved into a bldg, update it to indicate that
    # the residents is inside
    if destination_bldg:
        logging.info("Occupying bldg at: {}".format(destination_addr))
        add_occupant(resident._id, destination_bldg["_id"])

        resident.occupy_bldg(destination_bldg)

        # if the bldg has payload that requires processing,
        if "payload" in destination_bldg and not destination_bldg["processed"]:
            logging.info("Yay, found something to eat!!!!!!!!!!!!!!!")
            # choose an action to apply to the payload
            with time_print(logging, "choose action"):
                action = resident.choose_action(destination_bldg)

            # apply the action
            with time_print(logging, "sending action"):
                resident.start_processing(action, destination_bldg)
    else:
        logging.info("Moving to empty address: {}".format(destination_addr))
        resident.occupy_empty_address(destination_addr)

    resident.save()
    t2 = datetime.utcnow()
    delta = t2 - t1
    duration_in_ms = delta.seconds * 1000 + delta.microseconds / 1000
    logging.info("Resident life event took: {}ms".format(duration_in_ms))
    release_lock(resident["_id"])