def add_bot(settings, bots_registry, room_id): new_bot = False if room_id is not None: room_bot, room_queue = bots_registry.get(room_id, (None, None)) if room_bot and room_bot.is_alive(): logging.debug("Bot for {} is already known".format(room_id)) else: logging.info("New bot for room {}".format(room_id)) new_bot = True start_chatbot_with_log = agent_function( start_chatbot, name=f"bot for room {room_id}", with_state=True) with start_action(action_type="start_chatbot", room_id=room_id) as action: task_id = action.serialize_task_id() room_queue = Queue() room_bot = start_chatbot_with_log(settings, room_id, room_queue, task_id=task_id) room_bot.start() bots_registry[room_id] = (room_bot, room_queue) return bots_registry, new_bot
def prepare_query(settings): event_type = settings.get("event_type") mnemonics_settings = settings.get("monitor", {}).get("mnemonics", {}) query_mnemonics = list(mnemonics_settings.values()) mnemonics_list = "|".join(query_mnemonics) values_pipe_fragments = [ r"lastv(value:object):if(\mnemonic:{0}) as {0}".format(item) for item in query_mnemonics ] units_pipe_fragments = [ r"lastv(uom:object):if(\mnemonic:{0}) as {0}_uom".format(item) for item in query_mnemonics ] pipe_fragments = values_pipe_fragments + units_pipe_fragments mnemonics_pipe = ", ".join(pipe_fragments) query = """ {} mnemonic!:({}) .flags:nocount => {} over last second every second => @filter({} != null) """.format(event_type, mnemonics_list, mnemonics_pipe, query_mnemonics[0]) logging.debug(f'query is "{query}"') return query
def format_and_send(message, room, author, connection_func): event = format_message_event(message, room, author, timestamp=get_timestamp()) logging.debug("Sending message {}".format(event)) connection_func(event)
def refresh_accumulator(latest_events, accumulator, index_mnemonic, window_duration): # Purge old events and add the new ones latest_event = latest_events[-1] window_end = latest_event.get(index_mnemonic, 0) window_start = window_end - window_duration last_index = window_start if index_mnemonic not in latest_event: mnemonics_list = latest_event.keys() logging.error( f"Mnemonic '{index_mnemonic}' not found. Available mnemonics are: '{mnemonics_list}'" ) accumulator.extend(latest_events) purged_accumulator = [] for item in accumulator: index = item.get(index_mnemonic, 0) if (window_start <= index <= window_end) and (index >= last_index): purged_accumulator.append(item) last_index = index elif index < last_index: # Reset the accumulator purged_accumulator = [item] last_index = index elif index == 0: logging.error(f"{index_mnemonic} not found, ignoring event") logging.debug("{} of {} events between {} and {}".format( len(purged_accumulator), len(accumulator), window_start, window_end)) return purged_accumulator, window_start, window_end
def open_files(settings, iterations, mode=READ_MODES.CONTINUOUS): path_list = settings["path_list"] index_mnemonic = settings["index_mnemonic"] if mode == READ_MODES.CONTINUOUS: path_index = iterations % len(path_list) else: path_index = iterations try: las_path, chat_path = path_list[path_index] with open(las_path, "r") as las_file: data = lasio.read(las_file) if chat_path: with open(chat_path, "r") as chat_file: chat_data = list(csv.DictReader(chat_file)) logging.debug("Success opening files {} and {}>".format( las_path, chat_path)) else: chat_data = [] logging.debug("Success opening file {}>".format(las_path)) success = True except Exception as e: data = e chat_data = None success = False logging.error("Error opening file {}, {}<{}>".format( las_path, e, type(e))) return success, data, chat_data, index_mnemonic
def start(statement, settings, timeout=None, **kwargs): live_settings = settings["live"] live_url = live_settings["url"] verify_ssl = live_settings.get("verify_ssl", True) if "session" not in settings: settings.update(session=build_session(live_settings)) session = settings["session"] realtime = kwargs.get("realtime", False) span = kwargs.get("span", None) preload = kwargs.get("preload", False) max_retries = kwargs.get("max_retries", 0) api_url = f"{live_url}/rest/query" query_payload = [{ "provider": "pipes", "preload": preload, "span": span, "follow": realtime, "expression": statement, }] with retry_on_failure(timeout, max_retries=max_retries): logging.debug(f"Query '{statement}' started") r = session.post(api_url, json=query_payload, verify=verify_ssl) r.raise_for_status() channels = [item.get("channel") for item in r.json()] return channels
def do_save(self, state: Mapping[str, Any], timestamp: number) -> None: state_filename = self.filename state.update(TIMESTAMP_KEY=timestamp) with open(state_filename, r"w+b") as f: dill.dump(state, f) self.updated_at = timestamp logging.debug(f"State for {self.identifier} saved")
def save(self, state: Mapping[str, Any], force: bool = False) -> None: now = time.time() next_possible_update = self.updated_at + self.delay_between_updates time_until_update = next_possible_update - now if (time_until_update > 0) and (not force): logging.debug( f"Update for {self.identifier} dropped. Wait {time_until_update:.2f}s" ) else: self.do_save(state, timestamp=now) return
def maybe_send_message_event(message, timestamp, settings): output_settings = settings["output"] message_event = output_settings.get("message_event", {}) event_type = message_event.get("event_type") messages_mnemonic = message_event.get("mnemonic") if event_type and messages_mnemonic: connection_func = build_sender_function(settings["live"]) event = {"timestamp": timestamp, messages_mnemonic: {"value": message}} logging.debug("Sending message event '{}' for '{}'".format( event, event_type)) raw.format_and_send(event, event_type, connection_func=connection_func) return True return False
def can_process(self, statement): confidence = self.get_confidence(statement) can_process = confidence > self.confidence_threshold logging.debug("""{} (confidence {}): The 10 most informative features are: {} """.format( self.__class__.__name__, confidence, ", ".join( pformat(item) for item in self.classifier.most_informative_features(n=10)), )) return can_process
def ensure_timeout(timeout): default_timeout = socket.getdefaulttimeout() if isinstance(timeout, (list, tuple)): socket_timeout = timeout[0] else: socket_timeout = timeout if socket_timeout is not None: socket.setdefaulttimeout(socket_timeout) logging.debug(f"Socket timeout is now {socket_timeout}") try: yield finally: socket.setdefaulttimeout(default_timeout) logging.debug(f"Socket timeout back to default ({default_timeout})")
def run(statement, settings, timeout=None, **kwargs): with start_action(action_type="query.run", statement=statement): live_settings = settings["live"] channels = start(statement, settings, timeout=timeout, **kwargs) logging.debug(f"Results channel is {channels}") live_url = live_settings["url"] results_url = f"{live_url}/cometd" events_queue = Queue() process = Process(target=watch, args=(results_url, channels, events_queue)) process.start() return process, events_queue
def route_message(settings, bots_registry, event): logging.debug("Got an event: {}".format(event)) messages = maybe_extract_messages(event) for message in messages: room_id = message.get("room", {}).get("id") sender = message.get("author", {}) bots_registry, new_bot = add_bot(settings, bots_registry, room_id) if new_bot: messenger.add_to_room(settings, room_id, sender) # Send the message to the room's bot process room_bot, room_queue = bots_registry.get(room_id, (None, None)) room_queue.put(event) return [item[0] for item in bots_registry.values()]
def read_next_frame(values_iterator, curves, curves_data, index_mnemonic): try: index, values = next(values_iterator) success = True except Exception as e: output_frame = {} success = False logging.debug("Error reading next value, {}<{}>".format(e, type(e))) if success: output_frame = {index_mnemonic: {"value": index, "uom": "s"}} for index, channel in enumerate(curves): uom = curves_data.get(channel) channel_value = values[index] output_frame[channel] = {"value": channel_value, "uom": uom} return success, output_frame
async def read_results(url, pairs, output_queue): setproctitle("krakenfx: reading updates") with start_action(action_type="krakenfx.fetch_updates", url=url): # connect to the server async with websockets.connect(url) as websocket: subscription = { "event": "subscribe", "subscription": { "name": "trade" }, "pair": pairs } logging.info(f"Subscribing to '{subscription}'") await websocket.send(json.dumps(subscription)) # listen for incoming messages async for message in websocket: logging.debug(f"New message'{message}'") output_queue.put(message)
def maybe_send_chat_message(message, settings, **kwargs): output_settings = settings["output"] author = output_settings.get("author") room = kwargs.get("room", output_settings.get("room")) shall_send_message = (room is not None) and (author is not None) if not shall_send_message: logging.warn( f"Cannot send message, room ({room}) and/or author ({author}) missing. Message is '{message}'" ) return False # [ECS][FIXME]: Author should not be altered here. It'd be better to receive it configured from the client <<<<< author["name"] = kwargs.get("author_name") or author.get("name") connection_func = build_sender_function(settings["live"]) logging.debug("Sending message '{}' from {} to {}".format( message, author, room)) format_and_send(message, room, author, connection_func=connection_func) return True
def maybe_send_chat_message(message, timestamp, settings, **kwargs): output_settings = settings["output"] author = output_settings.get("author") room = kwargs.get("room", output_settings.get("room")) shall_send_message = (room is not None) and (author is not None) if not shall_send_message: logging.warn( f"Cannot send message, room ({room}) and/or author ({author}) missing" ) return False author["name"] = kwargs.get("author_name") or author.get("name") connection_func = build_sender_function(settings["live"]) logging.debug("Sending message '{}' from {} to {}".format( message, author, room)) format_and_send(message, room, author, timestamp=timestamp, connection_func=connection_func) return True
async def read_results(url, channels, output_queue): setproctitle("live-client: cometd client for channels {}".format(channels)) with ensure_timeout(3.05): with start_action(action_type="query.read_results", url=url, channels=channels): # connect to the server async with Client(url) as client: for channel in channels: logging.debug(f"Subscribing to '{channel}'") await client.subscribe(channel) # listen for incoming messages async for message in client: logging.debug(f"New message'{message}'") output_queue.put(message) # Exit after the query has stopped event_data = message.get("data", {}) event_type = event_data.get("type") if event_type == EVENT_TYPE_DESTROY: return
def update_chat(chat, last_ts, next_ts, index_mnemonic, settings): if not chat: return items_to_send = [] for item in chat: item_index = int(item.get(index_mnemonic, -1)) if last_ts <= item_index < next_ts: items_to_send.append(item) elif item_index > next_ts: break logging.debug("{} messages between {} and {}".format( len(items_to_send), last_ts, next_ts)) for item in items_to_send: message = item.get("MESSAGE", "") source = item.get("SOURCE", "") if message and source: messenger.maybe_send_chat_message(message, timestamp.get_timestamp(), settings, author_name=source)
def format_and_send(annotation_data, settings, connection_func=None): timestamp = get_timestamp() event = format_event(timestamp, annotation_data, settings) logging.debug("Creating annotation {}".format(event)) connection_func(event)
def create(event_type, event_data, settings): connection_func = autodetect.build_sender_function(settings["live"]) logging.debug(f'Creating raw event of type "{event_type}": {event_data}') format_and_send(event_data, event_type, connection_func=connection_func)
def start(settings, **kwargs): """ Monitors trades of a set of (crypto)currency pairs using `kraken.com` public api. For each trade detected, a new event is sent to live. """ setproctitle("DDA: Currency trades datasource") # Input settings krakenfx_url = settings.get("krakenfx_url", "wss://ws.kraken.com/") pairs = settings.get("pairs", ["ETH/USD", "XBT/USD"]) timeout = settings.get("timeout", 30) # Output settings event_type = settings.get("output", {}).get("event_type", "dda_crypto_trades") skipstorage = settings.get("output", {}).get("skipstorage", True) state_manager = kwargs.get("state_manager") state = state_manager.load() last_trades = state.get("last_trades", {}) # A separate process is used to fetch data from kraken kraken_process, results_queue = get_trades(krakenfx_url, pairs) # Handle the events received from kraken while True: try: trade_data = json.loads(results_queue.get(timeout=timeout)) except queue.Empty: logging.exception(f"No results after {timeout} seconds") break # We are only interested in trade events is_trade = isinstance(trade_data, list) and len(trade_data) == 4 if is_trade: # Prepare an event channel_id, operation_data, operation_type, pair = trade_data operations = [{ "price": item[0], "volume": item[1], "time": item[2], "side": item[3], "orderType": item[4], "misc": item[5], } for item in operation_data] trade_event = { "channel_id": channel_id, "operations": operations, "operation_type": operation_type, "pair": pair, "__skipstorage": skipstorage, } # And send to live raw.create(event_type, trade_event, settings) # Update this datasource's state with the last trade for each pair # This might be useful if you needed to restore this state # when the datasource is restarted last_trades.update(pair=trade_event) state_manager.save({"last_trades": last_trades}) else: logging.debug(f"Ignoring event {trade_data}") continue # Release resources on exit results_queue.close() kraken_process.terminate() kraken_process.join() return