Esempio n. 1
0
        def wrapper(*args, **kwargs):
            results_process, results_queue = run(
                statement, settings, realtime=realtime, timeout=timeout, **query_args
            )
            last_result = None

            while True:
                try:
                    event = results_queue.get(timeout=timeout)
                    event_type = event.get("data", {}).get("type")
                    if event_type == EVENT_TYPE_EVENT:
                        last_result = f(event, *args, **kwargs)
                    elif event_type == EVENT_TYPE_DESTROY:
                        break
                    else:
                        if event_type != EVENT_TYPE_SPAN:
                            logging.info(f"Got event with type={event_type}")

                        continue
                except queue.Empty:
                    logging.exception(f"No results after {timeout} seconds")
                    break
                except EOFError as e:
                    logging.exception(f"Connection lost: {e}")
                    break

            # Release resources after the query ends
            results_queue.close()
            results_process.terminate()
            results_process.join()

            return last_result
Esempio n. 2
0
def handle_events(event, callback, settings, accumulator=None):
    monitor_settings = settings.get("monitor", {})
    window_duration = monitor_settings.get("window_duration", 60)
    mnemonics = monitor_settings.get("mnemonics", {})
    index_mnemonic = mnemonics.get("index", "timestamp")

    if accumulator is None:
        accumulator = []

    try:
        latest_data, missing_curves = validate_event(event, settings)

        if latest_data:
            accumulator, start, end = refresh_accumulator(
                latest_data, accumulator, index_mnemonic, window_duration)

            if accumulator:
                callback(accumulator)

        elif missing_curves:
            missing_curve_names = ", ".join(missing_curves)
            logging.info(
                f"Some curves are missing ({missing_curve_names}) from event {event} "
            )

    except Exception as e:
        logging.exception(f"Error during query: <{e}>")
        handle_events(event, callback, settings)
        return
Esempio n. 3
0
def start(settings, **kwargs):
    event_type = settings["output"]["event_type"]
    setproctitle('DDA: LAS replayer for "{}"'.format(event_type))

    handling_func = export_curves_data

    iterations = 0
    while True:
        try:
            success, las_data, chat_data, index_mnemonic = open_files(
                settings, iterations)

            if success:
                handling_func(event_type, las_data, chat_data, index_mnemonic,
                              settings)
                logging.info("{}: Iteration {} successful".format(
                    event_type, iterations))

            else:
                logging.info(
                    "{}: Single pass mode, exiting".format(event_type))
                break

        except Exception as e:
            logging.error("{}: Error processing events, {}<{}>".format(
                event_type, e, type(e)))

        iterations += 1

    return
Esempio n. 4
0
def add_bot(settings, bots_registry, room_id):
    new_bot = False
    if room_id is not None:
        room_bot, room_queue = bots_registry.get(room_id, (None, None))

        if room_bot and room_bot.is_alive():
            logging.debug("Bot for {} is already known".format(room_id))
        else:
            logging.info("New bot for room {}".format(room_id))
            new_bot = True

            start_chatbot_with_log = agent_function(
                start_chatbot, name=f"bot for room {room_id}", with_state=True)
            with start_action(action_type="start_chatbot",
                              room_id=room_id) as action:
                task_id = action.serialize_task_id()
                room_queue = Queue()
                room_bot = start_chatbot_with_log(settings,
                                                  room_id,
                                                  room_queue,
                                                  task_id=task_id)

            room_bot.start()
            bots_registry[room_id] = (room_bot, room_queue)

    return bots_registry, new_bot
Esempio n. 5
0
def start(settings, **kwargs):
    logging.info("Trade frequency monitor started")
    setproctitle("DDA: Trade frequency monitor")

    monitor_settings = settings.get("monitor", {})
    window_duration = monitor_settings.get("window_duration", 60)
    sampling_frequency = monitor_settings.get("sampling_frequency", 30)
    max_threshold = monitor_settings.get("max_threshold", 100)

    fr_query = f"""krakenfx
        => count() by pair every {sampling_frequency} seconds over last {window_duration} seconds
        => @filter(count > {max_threshold})
    """
    span = f"last {window_duration} seconds"

    @on_event(fr_query, settings, span=span, timeout=read_timeout)
    def handle_events(event):
        # Generate alerts whether the threshold was reached
        # a new event means another threshold breach
        event_content = event.get("data", {}).get("content", [])

        for item in event_content:
            template = "{} traded {} times over the last {} seconds"
            message = template.format(item["pair"], int(item["count"]), window_duration)
            messenger.send_message(message, timestamp=item["timestamp"], settings=settings)

        return

    handle_events()
Esempio n. 6
0
def async_send(queue, live_settings):
    with start_action(action_type="async_logger"):
        logging.info("Remote logger process started")
        setproctitle("DDA: Remote logger")

        live_settings.update(session=build_session(live_settings))
        while True:
            event = queue.get()
            send_event(event, live_settings)
def async_send(queue, live_settings):
    with start_action(action_type="async_logger"):
        logging.info("Remote logger process started")
        setproctitle("DDA: Remote logger")

        live_settings.update(session=build_session(live_settings))
        while True:
            event = queue.get()
            try:
                send_event(event, live_settings)
            except RequestException:
                logging.warn("Ignoring previous exception")
                pass
Esempio n. 8
0
    def load(self) -> Dict[str, Any]:
        state_filename = self.filename

        try:
            with open(state_filename, r"r+b") as f:
                state = dill.load(f)
        except Exception:
            state = {}

        self.updated_at = state.get(TIMESTAMP_KEY, self.updated_at)

        logging.info(f"State for {self.identifier} ({len(state)} keys) loaded")
        return state
Esempio n. 9
0
def monitor_processes(process_map: Mapping,
                      heartbeat_interval: int = 60) -> Iterable:

    while True:
        for name, process_data in process_map.items():
            process = process_data.process

            if process and process.is_alive():
                logging.info(
                    f'Process for "{name}" (pid={process.pid}) is alive')

            else:
                if process:
                    logging.info(
                        f'Process for "{name}" (pid={process.pid}) has died. Restarting'
                    )
                else:
                    logging.info(
                        f'Starting "{name}" using {process_data.function}')

                process = process_data.function(process_data.settings)
                try:
                    process.start()
                    logging.info(
                        f'Process for "{name}" (pid={process.pid}) started')
                except OSError as e:
                    logging.exception(f"Error starting process {name} ({e})")

            process_data.process = process

        sleep(heartbeat_interval)

    running_processes = [item["process"] for item in process_map.values()]
    return running_processes
Esempio n. 10
0
def start(settings, **kwargs):
    setproctitle("DDA: Chatbot main process")
    logging.info("Chatbot process started")

    # Load the previous state
    state_manager = kwargs.get("state_manager")
    state = state_manager.load()
    bots_registry = state.get("bots_registry", {})

    # Restart previously known bots
    rooms_with_bots = bots_registry.keys()
    for room_id in rooms_with_bots:
        bots_registry, new_bot = add_bot(settings, bots_registry, room_id)

    bot_alias = settings.get("alias", "Intelie").lower()
    bot_query = f"""
        __message -__delete:*
        => @filter(
            message:lower():contains("{bot_alias}") &&
            author->name:lower() != "{bot_alias}"
        )
    """

    @query.on_event(bot_query, settings, timeout=read_timeout)
    def handle_events(event, *args, **kwargs):
        messenger.join_messenger(settings)
        route_message(settings, bots_registry, event)

        # There is no use saving the processes, so we save a dict with no values
        state_manager.save(
            {
                "bots_registry":
                dict((room_id, (None, None)) for room_id in bots_registry)
            },
            force=True,
        )
        return

    try:
        handle_events()
    except Exception:
        # Nothing to do. Let this process end
        pass

    for bot in active_children():
        bot.terminate()
        bot.join(5)

    return
Esempio n. 11
0
def start(global_settings: Mapping) -> Iterable:
    processes_to_run = resolve_process_handlers(global_settings)
    num_processes = len(processes_to_run)
    logging.info("Starting {} processes: {}".format(
        num_processes, ", ".join(processes_to_run.keys())))

    process_map = {}
    for name, settings in processes_to_run.items():
        process_func = settings.pop("process_func")

        process_map[name] = ProcessSpec(
            function=agent_function(process_func, name=name, with_state=True),
            settings=settings,
            process=None,
        )

    return monitor_processes(process_map)
Esempio n. 12
0
def export_curves_data(event_type, las_data, chat_data, index_mnemonic,
                       settings):
    logging.info("Exporting curves for {}".format(event_type))
    output_dir = settings.get("temp_dir", "/tmp")

    source_name = las_data.version.SOURCE.value
    output_filename = "{}/{}.csv".format(output_dir, source_name)

    with open(output_filename, "w") as output_file:
        writer = csv.writer(output_file)

        for curve in las_data.curves:
            writer.writerow([
                "{} - {}".format(curve.mnemonic, curve.descr), curve.mnemonic,
                curve.unit, "", ""
            ])

    logging.info("File {} created".format(output_filename))
Esempio n. 13
0
def retry_on_failure(timeout=3.05, max_retries=0):
    request_finished = False
    retries = 0

    while request_finished is False:
        try:
            with ensure_timeout(timeout):
                yield
        except (socket.timeout, Timeout):
            if max_retries and (retries < max_retries):
                logging.info(
                    f"Operation timed out, retrying ({retries}/{max_retries})")
                retries += 1
                continue
            else:
                logging.error(f"Operation timed out")
        finally:
            request_finished = True
Esempio n. 14
0
async def read_results(url, pairs, output_queue):
    setproctitle("krakenfx: reading updates")
    with start_action(action_type="krakenfx.fetch_updates", url=url):
        # connect to the server
        async with websockets.connect(url) as websocket:
            subscription = {
                "event": "subscribe",
                "subscription": {
                    "name": "trade"
                },
                "pair": pairs
            }
            logging.info(f"Subscribing to '{subscription}'")
            await websocket.send(json.dumps(subscription))

            # listen for incoming messages
            async for message in websocket:
                logging.debug(f"New message'{message}'")
                output_queue.put(message)
Esempio n. 15
0
def generate_events(event_type, las_data, chat_data, index_mnemonic, settings,
                    state_manager):
    logging.info("{}: Event generation started".format(event_type))

    source_name = las_data.version.SOURCE.value
    curves_data = dict((item.mnemonic, item.unit) for item in las_data.curves)
    las_df = las_data.df()
    values_iterator = las_df.iterrows()
    curves = las_df.columns

    success = True
    state = state_manager.load()
    last_timestamp = state.get("last_timestamp", 0)
    if last_timestamp > 0:
        logging.info(f"Skipping to index {last_timestamp}")

    while success:
        success, statuses = read_next_frame(values_iterator, curves,
                                            curves_data, index_mnemonic)

        if success:
            next_timestamp = statuses.get(index_mnemonic, {}).get("value", 0)

        if next_timestamp > last_timestamp:
            delay_output(last_timestamp, next_timestamp)

            if last_timestamp == 0:
                message = "Replay from '{}' started at TIME {}".format(
                    source_name, next_timestamp)
                send_message(message,
                             timestamp.get_timestamp(),
                             settings=settings)

            raw.create(event_type, statuses, settings)

            update_chat(chat_data, last_timestamp, next_timestamp,
                        index_mnemonic, settings)
            last_timestamp = next_timestamp
            state_manager.save({"last_timestamp": last_timestamp})
Esempio n. 16
0
def process_messages(chatbot, messages):
    settings = chatbot.context.get("settings")
    room_id = chatbot.context.get("room_id")

    for message in messages:
        with start_action(action_type="process_message",
                          message=message.get("text")):
            is_mention, message = maybe_mention(settings, message)

            response = None
            if is_mention:
                response = chatbot.get_response(message)

            if response is not None:
                logging.info('Bot response is "{}"'.format(
                    response.serialize()))
                if isinstance(response, ActionStatement):
                    response.chatbot = chatbot
                    response_message = response.run()
                else:
                    response_message = response.text

                maybe_send_message(settings, room_id, response_message)
Esempio n. 17
0
    def run(self):
        with Action.continue_task(task_id=self.task_id):
            try:
                with open(self.settings_file, "r") as fd:
                    global_settings = json.load(fd)

                logging_settings = global_settings.get("logging")
                live_settings = global_settings.get("live")

                logging.setup_python_logging(logging_settings)
                logging.setup_live_logging(logging_settings, live_settings)

                agent_processes = processes.start(global_settings)
            except KeyboardInterrupt:
                logging.info("Execution interrupted")
                raise
            except Exception:
                logging.exception("Error processing inputs")
                raise

        for item in agent_processes:
            item.terminate()
            item.join()
Esempio n. 18
0
def start(settings, **kwargs):
    """
    Starts the LAS replayer, based on the process settings.

    Replays sensors and chat data.

    :param settings: Parameters for this `las_replayer` instance
    :type settings: dict

    :param state_manager: StateManager injected by `live-agent`
    :type state_manager: live_agent.services.StateManager

    :rtype: NoneType

    The settings for this process have the following format::

      {
        "type": "las_replay",
        "enabled": true,  # Self explanatory
        "index_mnemonic": "TIME",  # Curve used as index for the LAS data
        "path_list": [
          # A list of filename pairs containing the data to be replayed
          [<path for a LAS file>, <path for a CSV file containing the chat logs>],
          ...
        ]
        "output": {
          "event_type": "raw_wellX", The name of the event type which should be sent to Intelie Live
          "author": {
            "id": <user id>  # User id of the author for the messages
            "name": "Linguistics monitor"  # User name of the author for the messages
          },
          "room": {
            "id": <room id>  # Id of the room where the messages should be sent
          },
          "message_event": {
            # Information for generating markers on charts
            "event_type": "raw_wellX",  # Usually the raw event type of the asset being monitored
            "mnemonic": "MSG"  # Mnemonic used for messages normalization, usually named `MSG`
          }
        }
      }

    The LAS file *must* be SPACE delimited.

    The CSV file must contain at least 3 columns:

    - `MESSAGE`: The text of the message
    - `SOURCE`: The name of the message sender
    - A column with the same name a the `index_mnemonic` defined on the process settings,
    used for correlating messages with the events from the LAS file.

    """
    event_type = settings["output"]["event_type"]
    cooldown_time = settings.get("cooldown_time", 300)
    setproctitle('DDA: LAS replayer for "{}"'.format(event_type))

    state_manager = kwargs.get("state_manager")
    state = state_manager.load()
    iterations = state.get("iterations", 0)

    while True:
        try:
            success, las_data, chat_data, index_mnemonic = open_files(
                settings, iterations, mode=READ_MODES.CONTINUOUS)

            if success:
                generate_events(event_type, las_data, chat_data,
                                index_mnemonic, settings, state_manager)
                logging.info("Iteration {} successful".format(iterations))
            else:
                logging.warn("Could not open files")

            state_manager.save({
                "last_timestamp": 0,
                "iterations": iterations
            },
                               force=True)
            loop.await_next_cycle(
                cooldown_time,
                message="Sleeping for {:.1f} minutes between runs".format(
                    cooldown_time / 60.0),
                log_func=logging.info,
            )

        except KeyboardInterrupt:
            logging.info("Stopping after {} iterations".format(iterations))
            raise

        except Exception as e:
            logging.error(
                "Error processing events during iteration {}, {}<{}>".format(
                    iterations, e, type(e)))

        iterations += 1
        del las_data
        del chat_data

    return
Esempio n. 19
0
 def download_and_log(self, name):
     logging.info(f"Downloading {name}")
     nltk.download(name)
Esempio n. 20
0
def log_and_import(name, package=None):
    try:
        return importlib.import_module(name)
    except Exception as e:
        logging.info(f"Error importing {name} (from package={package}): {e}")