Пример #1
0
class Consumer(threading.Thread):
    def __init__(
        self,
        connector: Dict[str, Any],
        opencti_url: str,
        opencti_token: str,
        log_level: str,
    ) -> None:
        threading.Thread.__init__(self)
        self.api = OpenCTIApiClient(opencti_url, opencti_token, log_level)
        self.queue_name = connector["config"]["push"]
        self.pika_credentials = pika.PlainCredentials(
            connector["config"]["connection"]["user"],
            connector["config"]["connection"]["pass"],
        )
        self.pika_parameters = pika.ConnectionParameters(
            connector["config"]["connection"]["host"],
            connector["config"]["connection"]["port"],
            "/",
            self.pika_credentials,
            ssl_options=pika.SSLOptions(create_ssl_context())
            if connector["config"]["connection"]["use_ssl"] else None,
        )

        self.pika_connection = pika.BlockingConnection(self.pika_parameters)
        self.channel = self.pika_connection.channel()
        self.channel.basic_qos(prefetch_count=1)
        self.processing_count: int = 0

    def get_id(self) -> Any:  # pylint: disable=inconsistent-return-statements
        if hasattr(self, "_thread_id"):
            return self._thread_id  # type: ignore  # pylint: disable=no-member
        # pylint: disable=protected-access,redefined-builtin
        for id, thread in threading._active.items():  # type: ignore
            if thread is self:
                return id

    def terminate(self) -> None:
        thread_id = self.get_id()
        res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
            thread_id, ctypes.py_object(SystemExit))
        if res > 1:
            ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0)
            logging.info("Unable to kill the thread")

    def nack_message(self, channel: BlockingChannel,
                     delivery_tag: int) -> None:
        if channel.is_open:
            logging.info("%s",
                         f"Message (delivery_tag={delivery_tag}) rejected")
            channel.basic_nack(delivery_tag)
        else:
            logging.info(
                "%s",
                f"Message (delivery_tag={delivery_tag}) NOT rejected (channel closed)",
            )

    def ack_message(self, channel: BlockingChannel, delivery_tag: int) -> None:
        if channel.is_open:
            logging.info(
                "%s", f"Message (delivery_tag={delivery_tag}) acknowledged")
            channel.basic_ack(delivery_tag)
        else:
            logging.info(
                "%s",
                (f"Message (delivery_tag={delivery_tag}) "
                 "NOT acknowledged (channel closed)"),
            )

    def stop_consume(self, channel: BlockingChannel) -> None:
        if channel.is_open:
            channel.stop_consuming()

    # Callable for consuming a message
    def _process_message(
        self,
        channel: BlockingChannel,
        method: Any,
        properties: None,  # pylint: disable=unused-argument
        body: str,
    ) -> None:
        data = json.loads(body)
        logging.info(
            "%s",
            (f"Processing a new message (delivery_tag={method.delivery_tag})"
             ", launching a thread..."),
        )
        thread = threading.Thread(
            target=self.data_handler,
            args=[self.pika_connection, channel, method.delivery_tag, data],
        )
        thread.start()

        while thread.is_alive():  # Loop while the thread is processing
            self.pika_connection.sleep(0.05)
        logging.info("Message processed, thread terminated")

    # Data handling
    def data_handler(  # pylint: disable=too-many-statements,too-many-locals
        self,
        connection: Any,
        channel: BlockingChannel,
        delivery_tag: str,
        data: Dict[str, Any],
    ) -> Optional[bool]:
        # Set the API headers
        applicant_id = data["applicant_id"]
        self.api.set_applicant_id_header(applicant_id)
        work_id = data["work_id"] if "work_id" in data else None
        # Execute the import
        self.processing_count += 1
        content = "Unparseable"
        try:
            content = base64.b64decode(data["content"]).decode("utf-8")
            types = (data["entities_types"] if "entities_types" in data
                     and len(data["entities_types"]) > 0 else None)
            update = data["update"] if "update" in data else False
            processing_count = self.processing_count
            if self.processing_count == PROCESSING_COUNT:
                processing_count = None  # type: ignore
            self.api.stix2.import_bundle_from_json(content, update, types,
                                                   processing_count)
            # Ack the message
            cb = functools.partial(self.ack_message, channel, delivery_tag)
            connection.add_callback_threadsafe(cb)
            if work_id is not None:
                self.api.work.report_expectation(work_id, None)
            self.processing_count = 0
            return True
        except Timeout as te:
            logging.warning("%s", f"A connection timeout occurred: {{ {te} }}")
            # Platform is under heavy load: wait for unlock & retry almost indefinitely.
            sleep_jitter = round(random.uniform(10, 30), 2)
            time.sleep(sleep_jitter)
            self.data_handler(connection, channel, delivery_tag, data)
            return True
        except RequestException as re:
            logging.error("%s", f"A connection error occurred: {{ {re} }}")
            time.sleep(60)
            logging.info(
                "%s",
                f"Message (delivery_tag={delivery_tag}) NOT acknowledged")
            cb = functools.partial(self.nack_message, channel, delivery_tag)
            connection.add_callback_threadsafe(cb)
            self.processing_count = 0
            return False
        except Exception as ex:  # pylint: disable=broad-except
            error = str(ex)
            if "LockError" in error and self.processing_count < MAX_PROCESSING_COUNT:
                # Platform is under heavy load:
                # wait for unlock & retry almost indefinitely.
                sleep_jitter = round(random.uniform(10, 30), 2)
                time.sleep(sleep_jitter)
                self.data_handler(connection, channel, delivery_tag, data)
            elif ("MissingReferenceError" in error
                  and self.processing_count < PROCESSING_COUNT):
                # In case of missing reference, wait & retry
                sleep_jitter = round(random.uniform(1, 3), 2)
                time.sleep(sleep_jitter)
                logging.info(
                    "%s",
                    (f"Message (delivery_tag={delivery_tag}) "
                     f"reprocess (retry nb: {self.processing_count})"),
                )
                self.data_handler(connection, channel, delivery_tag, data)
            else:
                # Platform does not know what to do and raises an error:
                # fail and acknowledge the message.
                logging.error(error)
                self.processing_count = 0
                cb = functools.partial(self.ack_message, channel, delivery_tag)
                connection.add_callback_threadsafe(cb)
                if work_id is not None:
                    self.api.work.report_expectation(work_id, {
                        "error": error,
                        "source": content
                    })
                return False
            return None

    def run(self) -> None:
        try:
            # Consume the queue
            logging.info("%s", f"Thread for queue {self.queue_name} started")
            self.channel.basic_consume(
                queue=self.queue_name,
                on_message_callback=self._process_message,
            )
            self.channel.start_consuming()
        finally:
            self.channel.stop_consuming()
            logging.info("%s",
                         f"Thread for queue {self.queue_name} terminated")
Пример #2
0
class Consumer(threading.Thread):
    def __init__(self, connector, opencti_url, opencti_token):
        threading.Thread.__init__(self)
        self.opencti_url = opencti_url
        self.opencti_token = opencti_token
        self.api = OpenCTIApiClient(self.opencti_url, self.opencti_token)
        self.queue_name = connector["config"]["push"]
        self.pika_credentials = pika.PlainCredentials(
            connector["config"]["connection"]["user"],
            connector["config"]["connection"]["pass"],
        )
        self.pika_parameters = pika.ConnectionParameters(
            connector["config"]["connection"]["host"],
            connector["config"]["connection"]["port"],
            "/",
            self.pika_credentials,
        )
        self.pika_connection = pika.BlockingConnection(self.pika_parameters)
        self.channel = self.pika_connection.channel()
        self.channel.basic_qos(prefetch_count=1)
        self.processing_count = 0

    def get_id(self):
        if hasattr(self, "_thread_id"):
            return self._thread_id
        for id, thread in threading._active.items():
            if thread is self:
                return id

    def terminate(self):
        thread_id = self.get_id()
        res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
            thread_id, ctypes.py_object(SystemExit)
        )
        if res > 1:
            ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0)
            logging.info("Unable to kill the thread")

    def nack_message(self, channel, delivery_tag):
        if channel.is_open:
            logging.info("Message (delivery_tag=" + str(delivery_tag) + ") rejected")
            channel.basic_nack(delivery_tag)
        else:
            logging.info(
                "Message (delivery_tag="
                + str(delivery_tag)
                + ") NOT rejected (channel closed)"
            )
            pass

    def ack_message(self, channel, delivery_tag):
        if channel.is_open:
            logging.info(
                "Message (delivery_tag=" + str(delivery_tag) + ") acknowledged"
            )
            channel.basic_ack(delivery_tag)
        else:
            logging.info(
                "Message (delivery_tag="
                + str(delivery_tag)
                + ") NOT acknowledged (channel closed)"
            )
            pass

    def stop_consume(self, channel):
        if channel.is_open:
            channel.stop_consuming()

    # Callable for consuming a message
    def _process_message(self, channel, method, properties, body):
        data = json.loads(body)
        logging.info(
            "Processing a new message (delivery_tag="
            + str(method.delivery_tag)
            + "), launching a thread..."
        )
        thread = threading.Thread(
            target=self.data_handler,
            args=[self.pika_connection, channel, method.delivery_tag, data],
        )
        thread.start()

        while thread.is_alive():  # Loop while the thread is processing
            self.pika_connection.sleep(0.05)
        logging.info("Message processed, thread terminated")

    # Data handling
    def data_handler(self, connection, channel, delivery_tag, data):
        # Set the API headers
        applicant_id = data["applicant_id"]
        self.api.set_applicant_id_header(applicant_id)
        work_id = data["work_id"] if "work_id" in data else None
        # Execute the import
        self.processing_count += 1
        content = "Unparseable"
        try:
            content = base64.b64decode(data["content"]).decode("utf-8")
            types = (
                data["entities_types"]
                if "entities_types" in data and len(data["entities_types"]) > 0
                else None
            )
            update = data["update"] if "update" in data else False
            processing_count = self.processing_count
            if self.processing_count == PROCESSING_COUNT:
                processing_count = None
            self.api.stix2.import_bundle_from_json(
                content, update, types, processing_count
            )
            # Ack the message
            cb = functools.partial(self.ack_message, channel, delivery_tag)
            connection.add_callback_threadsafe(cb)
            if work_id is not None:
                self.api.work.report_expectation(work_id, None)
            self.processing_count = 0
            return True
        except Timeout as te:
            logging.warn("A connection timeout occurred: { " + str(te) + " }")
            # Platform is under heavy load, wait for unlock & retry almost indefinitely
            sleep_jitter = round(random.uniform(10, 30), 2)
            time.sleep(sleep_jitter)
            self.data_handler(connection, channel, delivery_tag, data)
            return True
        except RequestException as re:
            logging.error("A connection error occurred: { " + str(re) + " }")
            time.sleep(60)
            logging.info(
                "Message (delivery_tag=" + str(delivery_tag) + ") NOT acknowledged"
            )
            cb = functools.partial(self.nack_message, channel, delivery_tag)
            connection.add_callback_threadsafe(cb)
            self.processing_count = 0
            return False
        except Exception as ex:
            error = str(ex)
            if "LockError" in error and self.processing_count < MAX_PROCESSING_COUNT:
                # Platform is under heavy load, wait for unlock & retry almost indefinitely
                sleep_jitter = round(random.uniform(10, 30), 2)
                time.sleep(sleep_jitter)
                self.data_handler(connection, channel, delivery_tag, data)
            elif (
                "MissingReferenceError" in error
                and self.processing_count < PROCESSING_COUNT
            ):
                # In case of missing reference, wait & retry
                sleep_jitter = round(random.uniform(1, 3), 2)
                time.sleep(sleep_jitter)
                logging.info(
                    "Message (delivery_tag="
                    + str(delivery_tag)
                    + ") reprocess (retry nb: "
                    + str(self.processing_count)
                    + ")"
                )
                self.data_handler(connection, channel, delivery_tag, data)
            else:
                # Platform does not know what to do and raises an error, fail and acknowledge the message
                logging.error(str(ex))
                self.processing_count = 0
                cb = functools.partial(self.ack_message, channel, delivery_tag)
                connection.add_callback_threadsafe(cb)
                if work_id is not None:
                    self.api.work.report_expectation(
                        work_id, {"error": str(ex), "source": content}
                    )
                return False

    def run(self):
        try:
            # Consume the queue
            logging.info("Thread for queue " + self.queue_name + " started")
            self.channel.basic_consume(
                queue=self.queue_name, on_message_callback=self._process_message
            )
            self.channel.start_consuming()
        finally:
            self.channel.stop_consuming()
            logging.info("Thread for queue " + self.queue_name + " terminated")
Пример #3
0
class Consumer(threading.Thread):
    def __init__(self, connector, opencti_url, opencti_token):
        threading.Thread.__init__(self)
        self.opencti_url = opencti_url
        self.opencti_token = opencti_token
        self.api = OpenCTIApiClient(self.opencti_url, self.opencti_token)
        self.queue_name = connector["config"]["push"]
        self.pika_connection = pika.BlockingConnection(
            pika.URLParameters(connector["config"]["uri"])
        )
        self.channel = self.pika_connection.channel()
        self.channel.basic_qos(prefetch_count=1)
        self.processing_count = 0

    def get_id(self):
        if hasattr(self, "_thread_id"):
            return self._thread_id
        for id, thread in threading._active.items():
            if thread is self:
                return id

    def terminate(self):
        thread_id = self.get_id()
        res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
            thread_id, ctypes.py_object(SystemExit)
        )
        if res > 1:
            ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0)
            logging.info("Unable to kill the thread")

    def nack_message(self, channel, delivery_tag):
        if channel.is_open:
            logging.info("Message (delivery_tag=" + str(delivery_tag) + ") rejected")
            channel.basic_nack(delivery_tag)
        else:
            logging.info(
                "Message (delivery_tag="
                + str(delivery_tag)
                + ") NOT rejected (channel closed)"
            )
            pass

    def ack_message(self, channel, delivery_tag):
        if channel.is_open:
            logging.info(
                "Message (delivery_tag=" + str(delivery_tag) + ") acknowledged"
            )
            channel.basic_ack(delivery_tag)
        else:
            logging.info(
                "Message (delivery_tag="
                + str(delivery_tag)
                + ") NOT acknowledged (channel closed)"
            )
            pass

    def stop_consume(self, channel):
        if channel.is_open:
            channel.stop_consuming()

    # Callable for consuming a message
    def _process_message(self, channel, method, properties, body):
        data = json.loads(body)
        logging.info(
            "Processing a new message (delivery_tag="
            + str(method.delivery_tag)
            + "), launching a thread..."
        )
        thread = threading.Thread(
            target=self.data_handler,
            args=[self.pika_connection, channel, method.delivery_tag, data],
        )
        thread.start()

        while thread.is_alive():  # Loop while the thread is processing
            self.pika_connection.sleep(0.05)
        logging.info("Message processed, thread terminated")

    def report_error(self, work_id, content, error):
        # Add in queue
        ACK_QUEUE.put(
            {
                "type": "nack",
                "work_id": work_id,
                "content": content,
                "error": str(error),
            }
        )

    def report_success(self, work_id):
        # Add in queue
        ACK_QUEUE.put(
            {
                "type": "ack",
                "work_id": work_id,
            }
        )

    # Data handling
    def data_handler(self, connection, channel, delivery_tag, data):
        # Set the API headers
        applicant_id = data["applicant_id"]
        self.api.set_applicant_id_header(applicant_id)
        work_id = data["work_id"] if "work_id" in data else None
        # Execute the import
        self.processing_count += 1
        content = "Unparseable"
        try:
            content = base64.b64decode(data["content"]).decode("utf-8")
            types = (
                data["entities_types"]
                if "entities_types" in data and len(data["entities_types"]) > 0
                else None
            )
            update = data["update"] if "update" in data else False
            self.api.stix2.import_bundle_from_json(content, update, types, False)
            # Ack the message
            cb = functools.partial(self.ack_message, channel, delivery_tag)
            connection.add_callback_threadsafe(cb)
            if work_id is not None:
                self.report_success(work_id)
            self.processing_count = 0
            return True
        except RequestException as re:
            logging.error("A connection error occurred: { " + str(re) + " }")
            time.sleep(60)
            logging.info(
                "Message (delivery_tag=" + str(delivery_tag) + ") NOT acknowledged"
            )
            cb = functools.partial(self.nack_message, channel, delivery_tag)
            connection.add_callback_threadsafe(cb)
            self.processing_count = 0
            return False
        except Exception as ex:
            error = str(ex)
            if "UnsupportedError" not in error and self.processing_count <= 5:
                time.sleep(1)
                logging.info(
                    "Message (delivery_tag="
                    + str(delivery_tag)
                    + ") reprocess (retry nb: "
                    + str(self.processing_count)
                    + ")"
                )
                self.data_handler(connection, channel, delivery_tag, data)
            else:
                logging.error(str(ex))
                self.processing_count = 0
                cb = functools.partial(self.ack_message, channel, delivery_tag)
                connection.add_callback_threadsafe(cb)
                if work_id is not None:
                    self.report_error(work_id, content, ex)
                return False

    def run(self):
        try:
            # Consume the queue
            logging.info("Thread for queue " + self.queue_name + " started")
            self.channel.basic_consume(
                queue=self.queue_name, on_message_callback=self._process_message
            )
            self.channel.start_consuming()
        finally:
            self.channel.stop_consuming()
            logging.info("Thread for queue " + self.queue_name + " terminated")