def _connect(self, server_name: str, server_url: str) -> Optional[Tuple[str, GMatrixHttpApi]]: log.debug("Connecting", server=server_name) api = GMatrixHttpApi(server_url) username = self._username password = self._password if server_name != self._own_server_name: signer = make_signer() username = str(to_normalized_address(signer.address)) password = encode_hex(signer.sign(server_name.encode())) try: response = api.login( "m.login.password", user=username, password=password, device_id="room_ensurer" ) api.token = response["access_token"] except MatrixHttpLibError: log.warning("Could not connect to server", server_url=server_url) return None except MatrixRequestError: log.warning("Failed to login to server", server_url=server_url) return None log.debug("Connected", server=server_name) return server_name, api
def _connect(self, server_name: str, server_url: str) -> Tuple[str, GMatrixHttpApi]: log.debug("Connecting", server=server_name) api = GMatrixHttpApi(server_url) response = api.login("m.login.password", user=self._username, password=self._password) api.token = response["access_token"] log.debug("Connected", server=server_name) return server_name, api
def matrix_api_shell(address, password, server): am = AccountManager(os.path.expanduser("~/.ethereum/keystore")) signer = LocalSigner(am.get_privkey(to_checksum_address(address), password)) server_name = server.split("//")[1] matrix_password = encode_hex(signer.sign(server_name.encode())) api = GMatrixHttpApi(server) resp = api.login("m.login.password", user=to_normalized_address(address), password=matrix_password) api.token = resp["access_token"] IPython.embed( header=f"Use the `api` object to interact with matrix on {server}.")
def _connect(self, server_name: str, server_url: str) -> Tuple[str, GMatrixHttpApi]: log.debug("Connecting", server=server_name) api = GMatrixHttpApi(server_url) username = self._username password = self._password if server_name != self._own_server_name: signer = make_signer() username = str(to_normalized_address(signer.address)) password = encode_hex(signer.sign(server_name.encode())) response = api.login("m.login.password", user=username, password=password) api.token = response["access_token"] log.debug("Connected", server=server_name) return server_name, api
def _update_user_activity_for_network(api: GMatrixHttpApi, user_activity: Dict[str, Any], current_time: int) -> List[str]: deadline = current_time - USER_PURGING_THRESHOLD possible_candidates = [ user_id for user_id, last_seen in user_activity.items() if last_seen < deadline ] due_users = list() # presence updates are only run for possible due users. # This helps to spread the load on the server as good as possible # Since this script runs once a day, every day a couple of users are # going to be updated rather than all at once for user_id in possible_candidates: try: response = api.get_presence(user_id) # in rare cases there is no last_active_ago sent if "last_active_ago" in response: last_active_ago = response["last_active_ago"] // 1000 else: last_active_ago = USER_PURGING_THRESHOLD + 1 presence = response["presence"] last_seen = current_time - last_active_ago user_activity[user_id] = last_seen if user_activity[user_id] < deadline and presence == "offline": due_users.append(user_id) except MatrixError as ex: click.secho(f"Could not fetch user presence of {user_id}: {ex}") finally: time.sleep(0.1) return due_users
def get_network_to_broadcast_rooms( api: GMatrixHttpApi) -> Dict[str, List[RoomInfo]]: room_alias_fragments = [ DISCOVERY_DEFAULT_ROOM, PATH_FINDING_BROADCASTING_ROOM, MONITORING_BROADCASTING_ROOM, ] server = urlparse(api.base_url).netloc network_to_broadcast: Dict[str, List[RoomInfo]] = dict() for network in Networks: broadcast_room_infos: List[RoomInfo] = list() network_to_broadcast[str(network.value)] = broadcast_room_infos for room_alias_fragment in room_alias_fragments: broadcast_room_alias = make_room_alias(ChainID(network.value), room_alias_fragment) local_room_alias = f"#{broadcast_room_alias}:{server}" try: room_id = api.get_room_id(local_room_alias) broadcast_room_infos.append( RoomInfo(room_id, broadcast_room_alias, server)) except MatrixError as ex: click.secho( f"Could not find room {broadcast_room_alias} with error {ex}" ) return network_to_broadcast
def _fetch_new_members_for_network(api: GMatrixHttpApi, user_activity: Dict[str, int], discovery_room: RoomInfo, current_time: int) -> None: try: response = api._send( "GET", f"/_synapse/admin/v1/rooms/{discovery_room.room_id}/members") server_name = urlparse(api.base_url).netloc room_members = [ member for member in response["members"] if member.split(":")[1] == server_name and not member.startswith("@admin") ] # Add new members with an overdue activity time # to trigger presence update later for user_id in room_members: if user_id not in user_activity: user_activity[ user_id] = current_time - USER_PURGING_THRESHOLD - 1 except MatrixError as ex: click.secho( f"Could not fetch members for {discovery_room.alias} with error {ex}" )
def _purge_inactive_users_for_network( api: GMatrixHttpApi, user_activity: Dict[str, int], due_users: List[str], broadcast_rooms: List[RoomInfo], ) -> None: for user_id in due_users: for room in broadcast_rooms: try: # kick the user and remove him from the user_activity_file last_ago = (int(time.time()) - user_activity[user_id]) / (60 * 60 * 24) api.kick_user(room.room_id, user_id) user_activity.pop(user_id, None) click.secho( f"{user_id} kicked from room {room.alias}. Offline for {last_ago} days." ) except MatrixError as ex: click.secho(f"Could not kick user from room {room.alias} with error {ex}") finally: time.sleep(0.1)
def _purge_inactive_users_for_network( api: GMatrixHttpApi, user_activity: Dict[str, int], due_users: List[str], ) -> None: for user_id in due_users: try: # delete user account and remove him from the user_activity_file last_ago = (int(time.time()) - user_activity[user_id]) / (60 * 60 * 24) api._send( "POST", f"/deactivate/{user_id}", content={"erase": True}, api_path="/_synapse/admin/v1", ) user_activity.pop(user_id, None) click.secho(f"{user_id} deleted. Offline for {last_ago} days.") except MatrixError as ex: click.secho(f"Could not delete user {user_id} with error {ex}") finally: time.sleep(0.1)
def get_discovery_room(api: GMatrixHttpApi, network_value: int) -> Optional[RoomInfo]: server = urlparse(api.base_url).netloc discovery_room_alias = make_room_alias(ChainID(network_value), DISCOVERY_DEFAULT_ROOM) local_room_alias = f"#{discovery_room_alias}:{server}" try: room_id = api.get_room_id(local_room_alias) return RoomInfo(room_id, discovery_room_alias, server) except MatrixError as ex: click.secho( f"Could not find room {discovery_room_alias} with error {ex}") return None
def _fetch_new_members_for_network( api: GMatrixHttpApi, user_activity: Dict[str, int], discovery_room: RoomInfo, current_time: int ) -> None: try: response = api.get_room_members(discovery_room.room_id) room_members = [ event["state_key"] for event in response["chunk"] if event["content"]["membership"] == "join" and event["state_key"].split(":")[1] == urlparse(api.base_url).netloc and not event["state_key"].find("admin") ] # Add new members with an overdue activity time # to trigger presence update later for user_id in room_members: if user_id not in user_activity: user_activity[user_id] = current_time - USER_PURGING_THRESHOLD - 1 except MatrixError as ex: click.secho(f"Could not fetch members for {discovery_room.alias} with error {ex}")
def purge( db_uri: str, server: str, credentials_file: TextIO, keep_newer: int, keep_min_msgs: int, parallel_purges: int, post_sql: TextIO, docker_restart_label: str, ) -> None: """ Purge historic data from rooms in a synapse server DB_URI: DB connection string: postgres://user:password@netloc:port/dbname SERVER: matrix synapse server url, e.g.: http://hostname All option can be passed through uppercase environment variables prefixed with 'MATRIX_' e.g.: export MATRIX_KEEP_MIN_MSGS=100 """ session = requests.Session() try: credentials = json.loads(credentials_file.read()) username = credentials["username"] password = credentials["password"] except (JSONDecodeError, UnicodeDecodeError, OSError, KeyError) as ex: click.secho(f"Invalid credentials file: {ex}", fg="red") sys.exit(1) api = GMatrixHttpApi(server) try: response = api.login("m.login.password", user=username, password=password) admin_access_token = response["access_token"] except (MatrixError, KeyError) as ex: click.secho(f"Could not log in to server {server}: {ex}") sys.exit(1) try: with psycopg2.connect(db_uri) as db, db.cursor() as cur: purges: Dict[str, str] = dict() def wait_and_purge_room(room_id: str = None, event_id: str = None) -> None: """ Wait for available slots in parallel_purges and purge room If room_id is None, just wait for current purges to complete and return If event_id is None, purge all events in room """ while len(purges) >= (parallel_purges if room_id else 1): # wait and clear completed purges time.sleep(1) for _room_id, purge_id in list(purges.items()): response = session.get( urljoin( server, "/_matrix/client/r0/admin/purge_history_status/" + quote(purge_id), ), params={"access_token": admin_access_token}, ) assert response.status_code == 200, f"{response!r} => {response.text!r}" if response.json()["status"] != "active": click.secho( f"Finished purge: room {_room_id!r}, purge {purge_id!r}" ) purges.pop(_room_id) if not room_id: return body: Dict[str, Any] = {"delete_local_events": True} if event_id: body["purge_up_to_event_id"] = event_id else: body["purge_up_to_ts"] = int(time.time() * 1000) response = session.post( urljoin( server, "/_matrix/client/r0/admin/purge_history/" + quote(room_id)), params={"access_token": admin_access_token}, json=body, ) if response.status_code == 200: purge_id = response.json()["purge_id"] purges[room_id] = purge_id return if not keep_newer and not keep_min_msgs: click.confirm( "No --keep-newer nor --keep-min-msgs option provided. Purge all history?", abort=True, ) ts_ms = None if keep_newer: ts = datetime.datetime.now() - datetime.timedelta(keep_newer) ts_ms = int(ts.timestamp() * 1000) cur.execute("SELECT room_id FROM rooms ;") all_rooms = {row for row, in cur} click.secho(f"Processing {len(all_rooms)} rooms") for room_id in all_rooms: # no --keep-min-msgs nor --keep-newer, purge everything if not keep_newer and not keep_min_msgs: wait_and_purge_room(room_id) continue cur.execute( f""" SELECT event_id FROM ( SELECT event_id, received_ts, COUNT(*) OVER (ORDER BY received_ts DESC) AS msg_count_above FROM events WHERE room_id=%(room_id)s AND type='m.room.message' ORDER BY received_ts DESC ) t WHERE true {'AND received_ts < %(ts_ms)s' if keep_newer else ''} {'AND msg_count_above > %(keep_min_msgs)s' if keep_min_msgs else ''} LIMIT 1 ;""", { "room_id": room_id, "ts_ms": ts_ms, "keep_min_msgs": keep_min_msgs }, ) if cur.rowcount: event_id, = cur.fetchone() wait_and_purge_room(room_id, event_id) # else: room doesn't have messages eligible for purging, skip wait_and_purge_room(None) if post_sql: click.secho(f"Running {post_sql.name!r}") with psycopg2.connect(db_uri) as db, db.cursor() as cur: cur.execute(post_sql.read()) click.secho(f"Results {cur.rowcount}:") for i, row in enumerate(cur): click.secho(f"{i}: {row}") finally: if docker_restart_label: client = docker.from_env() for container in client.containers.list(): if container.attrs["State"][ "Status"] != "running" or not container.attrs[ "Config"]["Labels"].get(docker_restart_label): continue try: # parse container's env vars env_vars: Dict[str, Any] = dict( itemgetter(0, 2)(e.partition("=")) for e in container.attrs["Config"]["Env"]) remote_config_file = ( env_vars.get("URL_KNOWN_FEDERATION_SERVERS") or URL_KNOWN_FEDERATION_SERVERS_DEFAULT) # fetch remote file remote_whitelist = yaml.load( requests.get(remote_config_file).text) # fetch local list from container's synapse config local_whitelist = yaml.load( container.exec_run([ "cat", SYNAPSE_CONFIG_PATH ]).output)["federation_domain_whitelist"] # if list didn't change, don't proceed to restart container if local_whitelist and remote_whitelist == local_whitelist: continue click.secho( f"Whitelist changed. Restarting. new_list={remote_whitelist!r}" ) except ( KeyError, IndexError, requests.RequestException, yaml.scanner.ScannerError, ) as ex: click.secho( f"An error ocurred while fetching whitelists: {ex!r}\n" "Restarting anyway", err=True, ) # restart container container.restart(timeout=30)
def purge(server: str, credentials_file: TextIO, docker_restart_label: str) -> None: """ Purge inactive users from broadcast rooms SERVER: matrix synapse server url, e.g.: http://hostname All option can be passed through uppercase environment variables prefixed with 'MATRIX_' """ try: credentials = json.loads(credentials_file.read()) username = credentials["username"] password = credentials["password"] except (JSONDecodeError, UnicodeDecodeError, OSError, KeyError) as ex: click.secho(f"Invalid credentials file: {ex}", fg="red") sys.exit(1) api = GMatrixHttpApi(server) try: api.login("m.login.password", user=username, password=password) except (MatrixError, KeyError) as ex: click.secho(f"Could not log in to server {server}: {ex}") sys.exit(1) try: global_user_activity = { "last_update": int(time.time()) - USER_PURGING_THRESHOLD - 1, "network_to_users": {}, } try: global_user_activity = json.loads(USER_ACTIVITY_PATH.read_text()) except JSONDecodeError: click.secho( f"{USER_ACTIVITY_PATH} is not a valid JSON. Starting with empty list" ) except FileNotFoundError: click.secho( f"{USER_ACTIVITY_PATH} not found. Starting with empty list") # check if there are new networks to add for network in Networks: if str(network.value) in global_user_activity["network_to_users"]: continue global_user_activity["network_to_users"][str( network.value)] = dict() # get broadcast room ids for all networks network_to_broadcast_rooms = get_network_to_broadcast_rooms(api) new_global_user_activity = run_user_purger(api, global_user_activity, network_to_broadcast_rooms) # write the updated user activity to file USER_ACTIVITY_PATH.write_text(json.dumps(new_global_user_activity)) finally: if docker_restart_label: client = docker.from_env() # type: ignore for container in client.containers.list(): if container.attrs["State"][ "Status"] != "running" or not container.attrs[ "Config"]["Labels"].get(docker_restart_label): continue try: # parse container's env vars env_vars: Dict[str, Any] = dict( itemgetter(0, 2)(e.partition("=")) for e in container.attrs["Config"]["Env"]) remote_config_file = ( env_vars.get("URL_KNOWN_FEDERATION_SERVERS") or URL_KNOWN_FEDERATION_SERVERS_DEFAULT) # fetch remote file remote_whitelist = yaml.load( requests.get(remote_config_file).text) # fetch local list from container's synapse config local_whitelist = yaml.load( container.exec_run([ "cat", SYNAPSE_CONFIG_PATH ]).output)["federation_domain_whitelist"] # if list didn't change, don't proceed to restart container if local_whitelist and remote_whitelist == local_whitelist: continue click.secho( f"Whitelist changed. Restarting. new_list={remote_whitelist!r}" ) except ( KeyError, IndexError, requests.RequestException, yaml.scanner.ScannerError, ) as ex: click.secho( f"An error ocurred while fetching whitelists: {ex!r}\n" "Restarting anyway", err=True, ) # restart container container.restart(timeout=30)
def purge( server: str, credentials_file: TextIO, docker_restart_label: Optional[str], url_known_federation_servers: str, ) -> None: """ Purge inactive users from broadcast rooms SERVER: matrix synapse server url, e.g.: http://hostname All option can be passed through uppercase environment variables prefixed with 'MATRIX_' """ try: credentials = json.loads(credentials_file.read()) username = credentials["username"] password = credentials["password"] except (JSONDecodeError, UnicodeDecodeError, OSError, KeyError) as ex: click.secho(f"Invalid credentials file: {ex}", fg="red") sys.exit(1) api = GMatrixHttpApi(server) try: response = api.login("m.login.password", user=username, password=password, device_id="purger") api.token = response["access_token"] except (MatrixError, KeyError) as ex: click.secho(f"Could not log in to server {server}: {ex}") sys.exit(1) try: global_user_activity: UserActivityInfo = { "last_update": int(time.time()) - USER_PURGING_THRESHOLD - 1, "network_to_users": {}, } try: global_user_activity = json.loads(USER_ACTIVITY_PATH.read_text()) except JSONDecodeError: click.secho( f"{USER_ACTIVITY_PATH} is not a valid JSON. Starting with empty list" ) except FileNotFoundError: click.secho( f"{USER_ACTIVITY_PATH} not found. Starting with empty list") # check if there are new networks to add for network in Networks: if str(network.value) in global_user_activity["network_to_users"]: continue global_user_activity["network_to_users"][str( network.value)] = dict() new_global_user_activity = run_user_purger(api, global_user_activity) # write the updated user activity to file USER_ACTIVITY_PATH.write_text( json.dumps(cast(Dict[str, Any], new_global_user_activity))) finally: if docker_restart_label: if not url_known_federation_servers: # In case an empty env var is set url_known_federation_servers = DEFAULT_MATRIX_KNOWN_SERVERS[ Environment.PRODUCTION] # fetch remote whiltelist try: remote_whitelist = json.loads( requests.get( url_known_federation_servers).text)["all_servers"] except (requests.RequestException, JSONDecodeError, KeyError) as ex: click.secho( f"Error while fetching whitelist: {ex!r}. " f"Ignoring, containers will be restarted.", err=True, ) # An empty whitelist will cause the container to be restarted remote_whitelist = [] client = docker.from_env() # pylint: disable=no-member for container in client.containers.list(): if container.attrs["State"][ "Status"] != "running" or not container.attrs[ "Config"]["Labels"].get(docker_restart_label): continue try: # fetch local list from container's synapse config local_whitelist = yaml.safe_load( container.exec_run([ "cat", SYNAPSE_CONFIG_PATH ]).output)["federation_domain_whitelist"] # if list didn't change, don't proceed to restart container if local_whitelist and remote_whitelist == local_whitelist: continue click.secho( f"Whitelist changed. Restarting. new_list={remote_whitelist!r}" ) except (KeyError, IndexError) as ex: click.secho( f"Error fetching container status: {ex!r}. Restarting anyway.", err=True, ) # restart container container.restart(timeout=30)