def list_assets(settings, asset_type=None): live_settings = settings["live"] url = live_settings["url"] data = [] if asset_type in ALL_ASSET_TYPES: chosen_asset_types = [asset_type] elif asset_type is None: chosen_asset_types = ALL_ASSET_TYPES else: chosen_asset_types = [] for atype in chosen_asset_types: asset_url = f"{url}/services/plugin-liverig/assets/{atype}" try: response_data = http.request_with_timeout(asset_url, settings) if response_data is not None: for asset in response_data: asset["asset_type"] = atype data.append(asset) except Exception: logging.exception(f"Error fetching asset list for {atype}") return data
def wrapper(*args, **kwargs): results_process, results_queue = run(statement, settings, realtime=realtime, timeout=timeout, **query_args) last_result = None while True: try: event = results_queue.get(timeout=timeout) except queue.Empty: logging.exception(f"No results after {timeout} seconds") break event_type = event.get("data", {}).get("type") if event_type == EVENT_TYPE_DESTROY: break elif event_type != EVENT_TYPE_EVENT: continue last_result = f(event, *args, **kwargs) results_process.join() return last_result
def monitor_processes(process_map: Mapping, heartbeat_interval: int = 60) -> Iterable: while True: for name, process_data in process_map.items(): process = process_data.process if process and process.is_alive(): logging.info( f'Process for "{name}" (pid={process.pid}) is alive') else: if process: logging.info( f'Process for "{name}" (pid={process.pid}) has died. Restarting' ) else: logging.info( f'Starting "{name}" using {process_data.function}') process = process_data.function(process_data.settings) try: process.start() logging.info( f'Process for "{name}" (pid={process.pid}) started') except OSError as e: logging.exception(f"Error starting process {name} ({e})") process_data.process = process sleep(heartbeat_interval) running_processes = [item["process"] for item in process_map.values()] return running_processes
def make_request(url, settings, timeout=None, max_retries=0, handle_errors=True): live_settings = settings["live"] verify_ssl = live_settings.get("verify_ssl", True) if "session" not in live_settings: live_settings.update(session=build_session(live_settings)) session = live_settings["session"] with start_action(action_type="make request", url=url): with retry_on_failure(timeout, max_retries=max_retries): try: response = session.get(url, verify=verify_ssl) response.raise_for_status() content_type = response.headers.get("Content-Type") if "text/plain" in content_type: result = response.text else: result = response.json() except RequestException as e: if handle_errors: logging.exception( f"Error during request for {url}, {e}<{type(e)}>") result = None else: raise return result
def wrapper(*args, **kwargs): results_process, results_queue = run( statement, settings, realtime=realtime, timeout=timeout, **query_args ) last_result = None while True: try: event = results_queue.get(timeout=timeout) event_type = event.get("data", {}).get("type") if event_type == EVENT_TYPE_EVENT: last_result = f(event, *args, **kwargs) elif event_type == EVENT_TYPE_DESTROY: break else: if event_type != EVENT_TYPE_SPAN: logging.info(f"Got event with type={event_type}") continue except queue.Empty: logging.exception(f"No results after {timeout} seconds") break except EOFError as e: logging.exception(f"Connection lost: {e}") break # Release resources after the query ends results_queue.close() results_process.terminate() results_process.join() return last_result
def handle_events(event, callback, settings, accumulator=None): monitor_settings = settings.get("monitor", {}) window_duration = monitor_settings.get("window_duration", 60) mnemonics = monitor_settings.get("mnemonics", {}) index_mnemonic = mnemonics.get("index", "timestamp") if accumulator is None: accumulator = [] try: latest_data, missing_curves = validate_event(event, settings) if latest_data: accumulator, start, end = refresh_accumulator( latest_data, accumulator, index_mnemonic, window_duration) if accumulator: callback(accumulator) elif missing_curves: missing_curve_names = ", ".join(missing_curves) logging.info( f"Some curves are missing ({missing_curve_names}) from event {event} " ) except Exception as e: logging.exception(f"Error during query: <{e}>") handle_events(event, callback, settings) return
def __init__(self, message_event): created_at = None try: author_timezone = timezone(message_event.get("timeZone", "UTC")) created_at = author_timezone.localize( datetime.fromtimestamp(int(message_event.get("createdAt")) / 1000) ) except (TypeError, ValueError) as e: logging.exception(e) message_event.update(text=message_event.get("message", ""), created_at=created_at) super().__init__(message_event)
def fetch_resource(path, settings, handle_errors=True): live_settings = settings["live"] url = live_settings["url"] items_url = f"{url}{path}" items_list = [] try: items_list = http.request_with_timeout(items_url, settings, handle_errors=handle_errors) except Exception: if handle_errors: logging.exception(f"Error fetching {items_url}") else: raise return items_list
def wrapped(*args, **kwargs): task_id = kwargs.get("task_id") if task_id: action = Action.continue_task(task_id=task_id) else: action = start_action(action_type=name) with action.context(): task_id = action.serialize_task_id() kwargs["task_id"] = task_id if with_state: kwargs["state_manager"] = StateManager(name) try: return f(*args, **kwargs) except Exception as e: logging.exception(f"Error during the execution of {f}: <{e}>") action.finish()
def send_event(event, live_settings=None): if live_settings is None: live_settings = {} if "session" not in live_settings: live_settings.update(session=build_session(live_settings)) session = live_settings["session"] verify_ssl = live_settings.get("verify_ssl", True) url = f"{live_settings['url']}{live_settings['rest_input']}" if not event: return try: with retry_on_failure(3.05, max_retries=5): response = session.post(url, json=event, verify=verify_ssl) response.raise_for_status() except RequestException as e: logging.exception("ERROR: Cannot send event, {}<{}>".format( e, type(e))) logging.exception("Event data: {}".format(event)) raise
def run(self): with Action.continue_task(task_id=self.task_id): try: with open(self.settings_file, "r") as fd: global_settings = json.load(fd) logging_settings = global_settings.get("logging") live_settings = global_settings.get("live") logging.setup_python_logging(logging_settings) logging.setup_live_logging(logging_settings, live_settings) agent_processes = processes.start(global_settings) except KeyboardInterrupt: logging.info("Execution interrupted") raise except Exception: logging.exception("Error processing inputs") raise for item in agent_processes: item.terminate() item.join()
def send_event(event, live_settings=None): if live_settings is None: live_settings = {} ip = live_settings["ip"] port = live_settings["port"] if not event: return message = "{}\n".format(json.dumps(event)) python_version = sys.version_info.major if python_version == 3: message = bytes(message, "utf-8") sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.connect((ip, port)) sock.sendall(message) except socket.error: logging.exception("ERROR: Cannot send event, server unavailable") logging.exception("Event data: {}".format(message)) finally: sock.close()
def start(settings, **kwargs): """ Monitors trades of a set of (crypto)currency pairs using `kraken.com` public api. For each trade detected, a new event is sent to live. """ setproctitle("DDA: Currency trades datasource") # Input settings krakenfx_url = settings.get("krakenfx_url", "wss://ws.kraken.com/") pairs = settings.get("pairs", ["ETH/USD", "XBT/USD"]) timeout = settings.get("timeout", 30) # Output settings event_type = settings.get("output", {}).get("event_type", "dda_crypto_trades") skipstorage = settings.get("output", {}).get("skipstorage", True) state_manager = kwargs.get("state_manager") state = state_manager.load() last_trades = state.get("last_trades", {}) # A separate process is used to fetch data from kraken kraken_process, results_queue = get_trades(krakenfx_url, pairs) # Handle the events received from kraken while True: try: trade_data = json.loads(results_queue.get(timeout=timeout)) except queue.Empty: logging.exception(f"No results after {timeout} seconds") break # We are only interested in trade events is_trade = isinstance(trade_data, list) and len(trade_data) == 4 if is_trade: # Prepare an event channel_id, operation_data, operation_type, pair = trade_data operations = [{ "price": item[0], "volume": item[1], "time": item[2], "side": item[3], "orderType": item[4], "misc": item[5], } for item in operation_data] trade_event = { "channel_id": channel_id, "operations": operations, "operation_type": operation_type, "pair": pair, "__skipstorage": skipstorage, } # And send to live raw.create(event_type, trade_event, settings) # Update this datasource's state with the last trade for each pair # This might be useful if you needed to restore this state # when the datasource is restarted last_trades.update(pair=trade_event) state_manager.save({"last_trades": last_trades}) else: logging.debug(f"Ignoring event {trade_data}") continue # Release resources on exit results_queue.close() kraken_process.terminate() kraken_process.join() return
def wrapped(*args, **kwargs): try: f_in_action = inside_action(f, name=name, with_state=with_state) return mp.Process(target=f_in_action, args=args, kwargs=kwargs) except Exception as e: logging.exception(f"Error during the execution of {f}: <{e}>")