def setUp(self): reactor = ExtendedMemoryReactorClock() logging_config = { "setup": { "disable_existing_loggers": False, # otherwise this breaks logging! "formatters": { "normal": { "format": "%(asctime)s [%(process)d] " "%(levelname)-5s %(name)s %(message)s" } }, "handlers": { "stderr": { "class": "logging.StreamHandler", "formatter": "normal", "stream": "ext://sys.stderr", }, }, "loggers": { "sygnal": { "handlers": ["stderr"], "propagate": False }, "sygnal.access": { "handlers": ["stderr"], "level": "INFO", "propagate": False, }, }, "root": { "handlers": ["stderr"], "level": "DEBUG" }, "version": 1, } } config = {"apps": {}, "log": logging_config} self.config_setup(config) config = merge_left_with_defaults(CONFIG_DEFAULTS, config) if USE_POSTGRES: self._set_up_database(self.dbname) self.sygnal = Sygnal(config, reactor) self.reactor = reactor self.sygnal.database.start() self.v1api = PushGatewayApiServer(self.sygnal) start_deferred = ensureDeferred( self.sygnal._make_pushkins_then_start(0, [], None)) while not start_deferred.called: # we need to advance until the pushkins have started up self.sygnal.reactor.advance(1) self.sygnal.reactor.wait_for_work(lambda: start_deferred.called)
def setUp(self): reactor = ExtendedMemoryReactorClock() logging_config = { "setup": { "disable_existing_loggers": False, # otherwise this breaks logging! "formatters": { "normal": { "format": "%(asctime)s [%(process)d] " "%(levelname)-5s %(name)s %(message)s" } }, "handlers": { "stderr": { "class": "logging.StreamHandler", "formatter": "normal", "stream": "ext://sys.stderr", }, }, "loggers": { "sygnal": {"handlers": ["stderr"], "propagate": False}, "sygnal.access": { "handlers": ["stderr"], "level": "INFO", "propagate": False, }, }, "root": {"handlers": ["stderr"], "level": "DEBUG"}, "version": 1, } } config = {"apps": {}, "log": logging_config} self.config_setup(config) config = merge_left_with_defaults(CONFIG_DEFAULTS, config) self.sygnal = Sygnal(config, reactor) self.reactor = reactor start_deferred = ensureDeferred(self.sygnal.make_pushkins_then_start()) while not start_deferred.called: # we need to advance until the pushkins have started up self.sygnal.reactor.advance(1) self.sygnal.reactor.wait_for_work(lambda: start_deferred.called) # sygnal should have started a single (fake) tcp listener listeners = self.reactor.tcpServers self.assertEqual(len(listeners), 1) (port, site, _backlog, interface) = listeners[0] self.site = site
def setUp(self): reactor = ExtendedMemoryReactorClock() config = {"apps": {}, "db": {}, "log": {"setup": {"version": 1}}} config = merge_left_with_defaults(CONFIG_DEFAULTS, config) self.config_setup(config) self.sygnal = Sygnal(config, reactor) self.sygnal.database.start() self.v1api = PushGatewayApiServer(self.sygnal) start_deferred = ensureDeferred( self.sygnal._make_pushkins_then_start(0, [], None)) while not start_deferred.called: # we need to advance until the pushkins have started up self.sygnal.reactor.advance(1) self.sygnal.reactor.wait_for_work(lambda: start_deferred.called)
class TestCase(unittest.TestCase): def config_setup(self, config): self.dbname = "_sygnal_%s" % (time_ns()) if USE_POSTGRES: config["database"] = { "name": "psycopg2", "args": { "user": POSTGRES_USER, "password": POSTGRES_PASSWORD, "database": self.dbname, "host": POSTGRES_HOST, }, } else: config["database"] = {"name": "sqlite3", "args": {"dbfile": ":memory:"}} def _set_up_database(self, dbname): conn = psycopg2.connect( database=POSTGRES_DBNAME_FOR_INITIAL_CREATE, user=POSTGRES_USER, password=POSTGRES_PASSWORD, host=POSTGRES_HOST, ) conn.autocommit = True cur = conn.cursor() cur.execute("DROP DATABASE IF EXISTS %s;" % (dbname,)) cur.execute("CREATE DATABASE %s;" % (dbname,)) cur.close() conn.close() def _tear_down_database(self, dbname): conn = psycopg2.connect( database=POSTGRES_DBNAME_FOR_INITIAL_CREATE, user=POSTGRES_USER, password=POSTGRES_PASSWORD, host=POSTGRES_HOST, ) conn.autocommit = True cur = conn.cursor() cur.execute("DROP DATABASE %s;" % (dbname,)) cur.close() conn.close() def setUp(self): reactor = ExtendedMemoryReactorClock() logging_config = { "setup": { "formatters": { "normal": { "format": "%(asctime)s [%(process)d] " "%(levelname)-5s %(name)s %(message)s" } }, "handlers": { "stderr": { "class": "logging.StreamHandler", "formatter": "normal", "stream": "ext://sys.stderr", }, }, "loggers": { "sygnal": {"handlers": ["stderr"], "propagate": False}, "sygnal.access": { "handlers": ["stderr"], "level": "INFO", "propagate": False, }, }, "root": {"handlers": ["stderr"], "level": "DEBUG"}, "version": 1, } } config = {"apps": {}, "log": logging_config} self.config_setup(config) config = merge_left_with_defaults(CONFIG_DEFAULTS, config) if USE_POSTGRES: self._set_up_database(self.dbname) self.sygnal = Sygnal(config, reactor) self.sygnal.database.start() self.v1api = PushGatewayApiServer(self.sygnal) start_deferred = ensureDeferred( self.sygnal._make_pushkins_then_start(0, [], None) ) while not start_deferred.called: # we need to advance until the pushkins have started up self.sygnal.reactor.advance(1) self.sygnal.reactor.wait_for_work(lambda: start_deferred.called) def tearDown(self): super().tearDown() self.sygnal.database.close() if USE_POSTGRES: self._tear_down_database(self.dbname) def _make_dummy_notification(self, devices): return { "notification": { "id": "$3957tyerfgewrf384", "room_id": "!slw48wfj34rtnrf:example.com", "event_id": "$qTOWWTEL48yPm3uT-gdNhFcoHxfKbZuqRVnnWWSkGBs", "type": "m.room.message", "sender": "@exampleuser:matrix.org", "sender_display_name": "Major Tom", "room_name": "Mission Control", "room_alias": "#exampleroom:matrix.org", "prio": "high", "content": { "msgtype": "m.text", "body": "I'm floating in a most peculiar way.", }, "counts": {"unread": 2, "missed_calls": 1}, "devices": devices, } } def _make_dummy_notification_event_id_only(self, devices): return { "notification": { "room_id": "!slw48wfj34rtnrf:example.com", "event_id": "$qTOWWTEL48yPm3uT-gdNhFcoHxfKbZuqRVnnWWSkGBs", "counts": {"unread": 2}, "devices": devices, } } def _make_dummy_notification_badge_only(self, devices): return { "notification": { "id": "", "type": None, "sender": "", "counts": {"unread": 2}, "devices": devices, } } def _request(self, payload: Union[str, dict]) -> Union[dict, int]: """ Make a dummy request to the notify endpoint with the specified payload Args: payload: payload to be JSON encoded Returns (dict or int): If successful (200 response received), the response is JSON decoded and the resultant dict is returned. If the response code is not 200, returns the response code. """ if isinstance(payload, dict): payload = json.dumps(payload) content = BytesIO(payload.encode()) channel = FakeChannel(self.v1api.site, self.sygnal.reactor) channel.process_request(b"POST", REQ_PATH, content) while not channel.done: # we need to advance until the request has been finished self.sygnal.reactor.advance(1) self.sygnal.reactor.wait_for_work(lambda: channel.done) assert channel.done assert channel.result is not None if channel.result.code != 200: return channel.result.code return json.loads(channel.response_body) def _multi_requests( self, payloads: List[Union[str, dict]] ) -> List[Union[dict, int]]: """ Make multiple dummy requests to the notify endpoint with the specified payloads. Acts like a listified version of `_request`. Args: payloads: list of payloads to be JSON encoded Returns (lists of dicts and/or ints): If successful (200 response received), the response is JSON decoded and the resultant dict is returned. If the response code is not 200, returns the response code. """ def dump_if_needed(payload): if isinstance(payload, dict): payload = json.dumps(payload) return payload contents = [BytesIO(dump_if_needed(payload).encode()) for payload in payloads] channels = [FakeChannel(self.v1api.site, self.sygnal.reactor) for _ in contents] for channel, content in zip(channels, contents): channel.process_request(b"POST", REQ_PATH, content) def all_channels_done(): return all([channel.done for channel in channels]) while not all_channels_done(): # we need to advance until the request has been finished self.sygnal.reactor.advance(1) self.sygnal.reactor.wait_for_work(all_channels_done) def channel_result(channel): if channel.result.code != 200: return channel.result.code else: return json.loads(channel.response_body) return [channel_result(channel) for channel in channels]
class TestCase(unittest.TestCase): def config_setup(self, config): pass def setUp(self): reactor = ExtendedMemoryReactorClock() logging_config = { "setup": { "disable_existing_loggers": False, # otherwise this breaks logging! "formatters": { "normal": { "format": "%(asctime)s [%(process)d] " "%(levelname)-5s %(name)s %(message)s" } }, "handlers": { "stderr": { "class": "logging.StreamHandler", "formatter": "normal", "stream": "ext://sys.stderr", }, }, "loggers": { "sygnal": {"handlers": ["stderr"], "propagate": False}, "sygnal.access": { "handlers": ["stderr"], "level": "INFO", "propagate": False, }, }, "root": {"handlers": ["stderr"], "level": "DEBUG"}, "version": 1, } } config = {"apps": {}, "log": logging_config} self.config_setup(config) config = merge_left_with_defaults(CONFIG_DEFAULTS, config) self.sygnal = Sygnal(config, reactor) self.reactor = reactor start_deferred = ensureDeferred(self.sygnal.make_pushkins_then_start()) while not start_deferred.called: # we need to advance until the pushkins have started up self.sygnal.reactor.advance(1) self.sygnal.reactor.wait_for_work(lambda: start_deferred.called) # sygnal should have started a single (fake) tcp listener listeners = self.reactor.tcpServers self.assertEqual(len(listeners), 1) (port, site, _backlog, interface) = listeners[0] self.site = site def _make_dummy_notification(self, devices): return { "notification": { "id": "$3957tyerfgewrf384", "room_id": "!slw48wfj34rtnrf:example.com", "event_id": "$qTOWWTEL48yPm3uT-gdNhFcoHxfKbZuqRVnnWWSkGBs", "type": "m.room.message", "sender": "@exampleuser:matrix.org", "sender_display_name": "Major Tom", "room_name": "Mission Control", "room_alias": "#exampleroom:matrix.org", "prio": "high", "content": { "msgtype": "m.text", "body": "I'm floating in a most peculiar way.", }, "counts": {"unread": 2, "missed_calls": 1}, "devices": devices, } } def _make_dummy_notification_event_id_only(self, devices): return { "notification": { "room_id": "!slw48wfj34rtnrf:example.com", "event_id": "$qTOWWTEL48yPm3uT-gdNhFcoHxfKbZuqRVnnWWSkGBs", "counts": {"unread": 2}, "devices": devices, } } def _make_dummy_notification_badge_only(self, devices): return { "notification": { "id": "", "type": None, "sender": "", "counts": {"unread": 2}, "devices": devices, } } def _request(self, payload: Union[str, dict]) -> Union[dict, int]: """ Make a dummy request to the notify endpoint with the specified payload Args: payload: payload to be JSON encoded Returns (dict or int): If successful (200 response received), the response is JSON decoded and the resultant dict is returned. If the response code is not 200, returns the response code. """ if isinstance(payload, dict): payload = json.dumps(payload) content = BytesIO(payload.encode()) channel = FakeChannel(self.site, self.sygnal.reactor) channel.process_request(b"POST", REQ_PATH, content) while not channel.done: # we need to advance until the request has been finished self.sygnal.reactor.advance(1) self.sygnal.reactor.wait_for_work(lambda: channel.done) assert channel.done assert channel.result is not None if channel.result.code != 200: return channel.result.code return json.loads(channel.response_body) def _multi_requests( self, payloads: List[Union[str, dict]] ) -> List[Union[dict, int]]: """ Make multiple dummy requests to the notify endpoint with the specified payloads. Acts like a listified version of `_request`. Args: payloads: list of payloads to be JSON encoded Returns (lists of dicts and/or ints): If successful (200 response received), the response is JSON decoded and the resultant dict is returned. If the response code is not 200, returns the response code. """ def dump_if_needed(payload): if isinstance(payload, dict): payload = json.dumps(payload) return payload contents = [BytesIO(dump_if_needed(payload).encode()) for payload in payloads] channels = [FakeChannel(self.site, self.sygnal.reactor) for _ in contents] for channel, content in zip(channels, contents): channel.process_request(b"POST", REQ_PATH, content) def all_channels_done(): return all(channel.done for channel in channels) while not all_channels_done(): # we need to advance until the request has been finished self.sygnal.reactor.advance(1) self.sygnal.reactor.wait_for_work(all_channels_done) def channel_result(channel): if channel.result.code != 200: return channel.result.code else: return json.loads(channel.response_body) return [channel_result(channel) for channel in channels]
class TestCase(unittest.TestCase): def config_setup(self, config): self.dbname = "_sygnal_%s" % (time_ns()) if USE_POSTGRES: config["database"] = { "name": "psycopg2", "args": { "user": POSTGRES_USER, "password": POSTGRES_PASSWORD, "database": self.dbname, "host": POSTGRES_HOST, }, } else: config["database"] = { "name": "sqlite3", "args": { "dbfile": ":memory:" } } def _set_up_database(self, dbname): conn = psycopg2.connect( database=POSTGRES_DBNAME_FOR_INITIAL_CREATE, user=POSTGRES_USER, password=POSTGRES_PASSWORD, host=POSTGRES_HOST, ) conn.autocommit = True cur = conn.cursor() cur.execute("DROP DATABASE IF EXISTS %s;" % (dbname, )) cur.execute("CREATE DATABASE %s;" % (dbname, )) cur.close() conn.close() def _tear_down_database(self, dbname): conn = psycopg2.connect( database=POSTGRES_DBNAME_FOR_INITIAL_CREATE, user=POSTGRES_USER, password=POSTGRES_PASSWORD, host=POSTGRES_HOST, ) conn.autocommit = True cur = conn.cursor() cur.execute("DROP DATABASE %s;" % (dbname, )) cur.close() conn.close() def setUp(self): reactor = ExtendedMemoryReactorClock() config = {"apps": {}, "log": {"setup": {"version": 1}}} self.config_setup(config) config = merge_left_with_defaults(CONFIG_DEFAULTS, config) if USE_POSTGRES: self._set_up_database(self.dbname) self.sygnal = Sygnal(config, reactor) self.sygnal.database.start() self.v1api = PushGatewayApiServer(self.sygnal) start_deferred = ensureDeferred( self.sygnal._make_pushkins_then_start(0, [], None)) while not start_deferred.called: # we need to advance until the pushkins have started up self.sygnal.reactor.advance(1) self.sygnal.reactor.wait_for_work(lambda: start_deferred.called) def tearDown(self): super().tearDown() self.sygnal.database.close() if USE_POSTGRES: self._tear_down_database(self.dbname) def _make_dummy_notification(self, devices): return { "notification": { "id": "$3957tyerfgewrf384", "room_id": "!slw48wfj34rtnrf:example.com", "type": "m.room.message", "sender": "@exampleuser:matrix.org", "sender_display_name": "Major Tom", "room_name": "Mission Control", "room_alias": "#exampleroom:matrix.org", "prio": "high", "content": { "msgtype": "m.text", "body": "I'm floating in a most peculiar way.", }, "counts": { "unread": 2, "missed_calls": 1 }, "devices": devices, } } def _request(self, payload) -> Union[dict, int]: """ Make a dummy request to the notify endpoint with the specified payload Args: payload: payload to be JSON encoded Returns (dict or int): If successful (200 response received), the response is JSON decoded and the resultant dict is returned. If the response code is not 200, returns the response code. """ if isinstance(payload, dict): payload = json.dumps(payload) content = BytesIO(payload.encode()) channel = FakeChannel(self.v1api.site, self.sygnal.reactor) channel.process_request(b"POST", REQ_PATH, content) while not channel.done: # we need to advance until the request has been finished self.sygnal.reactor.advance(1) self.sygnal.reactor.wait_for_work(lambda: channel.done) assert channel.done if channel.result.code != 200: return channel.result.code return json.loads(channel.response_body)
class TestCase(unittest.TestCase): def config_setup(self, config): config["db"]["dbfile"] = ":memory:" def setUp(self): reactor = ExtendedMemoryReactorClock() config = {"apps": {}, "db": {}, "log": {"setup": {"version": 1}}} config = merge_left_with_defaults(CONFIG_DEFAULTS, config) self.config_setup(config) self.sygnal = Sygnal(config, reactor) self.sygnal.database.start() self.v1api = PushGatewayApiServer(self.sygnal) start_deferred = ensureDeferred( self.sygnal._make_pushkins_then_start(0, [], None)) while not start_deferred.called: # we need to advance until the pushkins have started up self.sygnal.reactor.advance(1) self.sygnal.reactor.wait_for_work(lambda: start_deferred.called) def tearDown(self): super().tearDown() self.sygnal.database.close() def _make_dummy_notification(self, devices): return { "notification": { "id": "$3957tyerfgewrf384", "room_id": "!slw48wfj34rtnrf:example.com", "type": "m.room.message", "sender": "@exampleuser:matrix.org", "sender_display_name": "Major Tom", "room_name": "Mission Control", "room_alias": "#exampleroom:matrix.org", "prio": "high", "content": { "msgtype": "m.text", "body": "I'm floating in a most peculiar way.", }, "counts": { "unread": 2, "missed_calls": 1 }, "devices": devices, } } def _make_request(self, payload, headers=None): """ Make a dummy request to the notify endpoint with the specified Args: payload: payload to be JSON encoded headers (dict, optional): A L{dict} mapping header names as L{bytes} to L{list}s of header values as L{bytes} Returns (DummyRequest): A dummy request corresponding to the request arguments supplied. """ pathparts = REQ_PATH.split(b"/") if pathparts[0] == b"": pathparts = pathparts[1:] dreq = DummyRequest(pathparts) dreq.requestHeaders = Headers(headers or {}) dreq.responseCode = 200 # default to 200 if isinstance(payload, dict): payload = json.dumps(payload) dreq.content = BytesIO(payload.encode()) dreq.method = "POST" return dreq def _collect_request(self, request): """ Collects (waits until done and then returns the result of) the request. Args: request (Request): a request to collect Returns (dict or int): If successful (200 response received), the response is JSON decoded and the resultant dict is returned. If the response code is not 200, returns the response code. """ resource = self.v1api.site.getResourceFor(request) rendered = resource.render(request) if request.responseCode != 200: return request.responseCode if isinstance(rendered, str): return json.loads(rendered) elif rendered == NOT_DONE_YET: while not request.finished: # we need to advance until the request has been finished self.sygnal.reactor.advance(1) self.sygnal.reactor.wait_for_work(lambda: request.finished) assert request.finished > 0 if request.responseCode != 200: return request.responseCode written_bytes = b"".join(request.written) return json.loads(written_bytes) else: raise RuntimeError(f"Can't collect: {rendered}") def _request(self, *args, **kwargs): """ Makes and collects a request. See L{_make_request} and L{_collect_request}. """ request = self._make_request(*args, **kwargs) return self._collect_request(request)