async def test_csrf_cases(): persister = AsyncMock() all_requests = [] request = Request("http://127.0.0.1:65086/") request.path_id = 1 all_requests.append(request) request = Request( "http://127.0.0.1:65086/", method="POST", post_params=[["email", "*****@*****.**"], ["xsrf_token", "weak"]], ) request.path_id = 2 all_requests.append(request) request = Request( "http://127.0.0.1:65086/?check=true", method="POST", post_params=[["email", "*****@*****.**"], ["xsrf_token", "weak"]], ) request.path_id = 3 all_requests.append(request) request = Request( "http://127.0.0.1:65086/?check=true", method="POST", post_params=[["name", "Obiwan"]], ) request.path_id = 4 all_requests.append(request) crawler = AsyncCrawler("http://127.0.0.1:65086/", timeout=1) options = {"timeout": 10, "level": 1} module = mod_csrf(crawler, persister, options, Event()) module.do_post = True module.verbose = 2 for request in all_requests: if await module.must_attack(request): await module.attack(request) else: # Not attacked because of GET verb assert request.path_id == 1 vulnerabilities = set() for call in persister.add_payload.call_args_list: vulnerabilities.add((call[1]["request_id"], call[1]["info"])) assert vulnerabilities == { (2, _("CSRF token '{}' is not properly checked in backend").format("xsrf_token")), (3, _("CSRF token '{}' might be easy to predict").format("xsrf_token")), (4, _("Lack of anti CSRF token")) } await crawler.close()
async def connect_async(self, *, action: SocketAction = None, method: SocketMethod = None, path="", client_parameters={}, server_parameters={}): uri = self.uri(path, server_parameters) print(f"before connecting to {uri}") # TODO: optimize that, currently we use exprbitant long timeouts for connection ws = websockets.connect(uri, timeout=2000, ping_timeout=1000, ping_interval=1000, max_size=80 * 2**20) res: Any = None try: async with ws as self.websocket: self.websocket.client_parameters = client_parameters self.connection_event.set() self.connection_event = Event() self.connection_status = "connected" self.exception = None logger.info("connected..") await self.on_connect() if action: res = await action(self) elif method: res = await method() else: res = await self.run() except Exception as e: logger.exception(f"FAIL: {e.__class__.__name__}:{e}") self.exception = e raise finally: if self.websocket and self.websocket.close_code == 1001: ex = exception_from_dict(self.websocket.close_reason) raise ex elif self.websocket and self.websocket.close_code != 1000: errmsg = f"connection terminated abnormally [{self.websocket.close_code}], reason:{self.websocket.close_reason}" logger.error(errmsg) raise ConnectionError(errmsg) self.connection_event.set() await self.on_disconnect() logger.warn( f"connection closed with code: {self.websocket.close_code}, reason:{self.websocket.close_reason}" ) self.connection_status = "disconnected" self.websocket = None return res
async def wait_conn_async(gen: PQGenConn[RV], timeout: Optional[float] = None) -> RV: """ Coroutine waiting for a connection generator to complete. :param gen: a generator performing database operations and yielding (fd, `Ready`) pairs when it would block. :param timeout: timeout (in seconds) to check for other interrupt, e.g. to allow Ctrl-C. If zero or None, wait indefinitely. :return: whatever *gen* returns on completion. Behave like in `wait()`, but take the fileno to wait from the generator itself, which might change during processing. """ # Use an event to block and restart after the fd state changes. # Not sure this is the best implementation but it's a start. ev = Event() loop = get_event_loop() ready: Ready s: Wait def wakeup(state: Ready) -> None: nonlocal ready ready = state ev.set() timeout = timeout or None try: fileno, s = next(gen) while 1: ev.clear() if s == Wait.R: loop.add_reader(fileno, wakeup, Ready.R) await wait_for(ev.wait(), timeout) loop.remove_reader(fileno) elif s == Wait.W: loop.add_writer(fileno, wakeup, Ready.W) await wait_for(ev.wait(), timeout) loop.remove_writer(fileno) elif s == Wait.RW: loop.add_reader(fileno, wakeup, Ready.R) loop.add_writer(fileno, wakeup, Ready.W) await wait_for(ev.wait(), timeout) loop.remove_reader(fileno) loop.remove_writer(fileno) else: raise e.InternalError("bad poll status: %s") fileno, s = gen.send(ready) except TimeoutError: raise e.OperationalError("timeout expired") except StopIteration as ex: rv: RV = ex.args[0] if ex.args else None return rv
async def test_events(rest_manager, notifier): """ Testing whether various events are coming through the events endpoints """ global messages_to_wait_for connected_event = Event() events_up = Event() # await open_events_socket(rest_manager, connected_event, events_up) event_socket_task = create_task( open_events_socket(rest_manager, connected_event, events_up)) await connected_event.wait() testdata = { NTFY.CHANNEL_ENTITY_UPDATED: { "state": "Complete" }, NTFY.WATCH_FOLDER_CORRUPT_FILE: ("foo", ), NTFY.TRIBLER_NEW_VERSION: ("123", ), NTFY.CHANNEL_DISCOVERED: { "result": "bla" }, NTFY.TORRENT_FINISHED: (b'a' * 10, None, False), NTFY.LOW_SPACE: ("", ), NTFY.TUNNEL_REMOVE: (Circuit(1234, None), 'test'), NTFY.REMOTE_QUERY_RESULTS: { "query": "test" }, } messages_to_wait_for = {k.value for k in testdata} messages_to_wait_for.add(NTFY.TRIBLER_EXCEPTION.value) for subject, data in testdata.items(): if data: notifier.notify(subject, *data) else: notifier.notify(subject) rest_manager.root_endpoint.endpoints['/events'].on_tribler_exception( ReportedError('', '', {}, False)) await events_up.wait() event_socket_task.cancel() with suppress(CancelledError): await event_socket_task
def __init__(self, proc_fn=identity, inq=None, outq=None, auto_close=True, error_handler_fn=None, n_workers=5, auto_start=True, name=None) -> None: self.proc_fn = proc_fn self.error_handler_fn = error_handler_fn self.n_workers = n_workers self.name = name or proc_fn.__name__ self.inq = inq or CloseableQueue() self.outq = outq or CloseableQueue() self.auto_close = auto_close # Events that signal the processing state of this queue processor. self.started_processing = Event() self.finished_processing = Event() # -- Asynchronous Tasks --- # Will be set to a list of worker tasks that # do the actual work of processing the queue. self.worker_tasks = None # Task that waits for the input queue to be closed # and fully processed. When that happens, it cancels # the worker tasks and sets `finished_processing`. self.listen_for_input_done = ensure_future(self.__on_input_done__()) # -- / Asynchronous Tasks --- self.pipe_next = None self.pipe_prev = None if auto_start: self.start_processing()
def __init__(self, lang: defs.LangEnum = defs.LangEnum.ENG, log_handlers: [StreamHandler] = None): self._logger = getLogger(__name__) if log_handlers: for h in log_handlers: self._logger.addHandler(h) self._logger.debug("Initializing") self._controllers = self.get_controllers() self._rs_dev_scanner = RSDeviceCommScanner(self.get_profiles(), log_handlers) self._cam_scanner = CamScanner(log_handlers) self._ver_check = VersionChecker(log_handlers) self._log_handlers = log_handlers self._new_dev_view_flag = Event() self._remove_dev_view_flag = Event() self._done_saving_flag = Event() self._saving_flag = Event() self._current_lang = lang self._saver = RSSaver(lang, log_handlers) self._save_path = str() self._devs = dict() self._dev_inits = dict() self._new_dev_views = [] self._remove_dev_views = [] self._tasks = [] self._saving = False self._running = True self._block_num = 0 self._cond_name = str() self.exp_created = False self.exp_running = False self._flag_filename = "flags.csv" self._note_filename = "notes.csv" self._events_filename = "events.csv" self._first_note = True self._first_flag = True self._main_strings = strings[lang] self._note_strings = note_strings[lang] self._flag_strings = flag_strings[lang] self._loop = get_running_loop() self._logger.debug("Initialized")
async def on_ready(self): for guild in self.bot.guilds: # If it got added to a guild that keeps crashing, log the invite # print(guild.name, await guild.channels[0].create_invite()) if guild.id not in self.ready_locks: self.ready_locks[guild.id] = Event() channel = get(guild.text_channels, name=self.bot.channel_name) if channel is None: await self.on_guild_join(guild) continue self.ready_locks[guild.id].set()
def __init__(self, topic_partitions: Iterable[TopicPartition]): assert isinstance(topic_partitions, (list, set, tuple)) self._topic_partitions = frozenset(topic_partitions) self._tp_state = {} # type: Dict[TopicPartition, TopicPartitionState] for tp in self._topic_partitions: self._tp_state[tp] = TopicPartitionState(self) self.unassign_future = create_future() self.commit_refresh_needed = Event()
async def test_start(self): service = Service() run = Event() @service(name="good", autostart=False) async def good(): run.set() assert service.services[0].status == Status.stopped service.start("good") await run.wait()
def __init__(self, loop: AbstractEventLoop, ip_addr: str, phone_id: str, device_id: str, device_password: str) -> None: """Initialize the Switcher V2 API.""" self._loop = loop self._ip_addr = ip_addr self._phone_id = phone_id self._device_id = device_id self._device_password = device_password self._reader = None # type: Optional[StreamReader] self._writer = None # type: Optional[StreamWriter] self._connected_evt = Event()
def __init__(self, loop: AbstractEventLoop, phone_id: str, device_id: str, device_password: str) -> None: """Initialize the switcherv2 bridge.""" self._loop = loop self._phone_id = phone_id self._device_id = device_id self._device_password = device_password self._device = None # type: Optional[SwitcherV2Device] self._running_evt = Event() self._queue = Queue(maxsize=1) # type: Queue
def __init__(self, reader, writer, chat_system: "ChatSystem"): self.reader = reader self.writer = writer self.chat_system = chat_system self.addr = str(writer.get_extra_info("peername")) self.blocks: List[ChatUser] = [] self._nick = None self.is_moderator = False self.is_kicked = Event() return
def __init__(self, model: Model, time: Time, config: Config, global_info: GlobalInfo, estimator: Estimator): super().__init__() self._model = model self._time = time self._config = config self._lock: Lock = Lock() self._global_info: GlobalInfo = global_info self._sources = { self._model.source.name(): self._model.source, self._model.dest.name(): self._model.dest } self._backoff = Backoff(initial=0, base=10, max=60 * 60) self._estimator = estimator self._busy = False self._sync_task: Task = None self._sync_start = Event() self._sync_wait = Event() self._sync_wait.set() self._global_info.triggerBackupCooldown(timedelta(minutes=self._config.get(Setting.BACKUP_STARTUP_DELAY_MINUTES))) self.trigger()
async def stop(self) -> Optional[Event]: """ stops the server check if server is running before calling :return: """ if self.status == 0 or self.status == 3: return None await self.send_command("stop") self._stop_event = Event() return self._stop_event
def __init__(self, bot, guild_id): self.bot = bot self.guild_id = guild_id self.channel = None self.next = Event() self.queue = Queue() self.volume = 40 self.bot.loop.create_task(self.controller_loop())
def __init__(self, model: Model, time: Time, config: Config, global_info: GlobalInfo, estimator: Estimator): super().__init__() self._model = model self._time = time self._config = config self._lock: Lock = Lock() self._global_info: GlobalInfo = global_info self._sources = { self._model.source.name(): self._model.source, self._model.dest.name(): self._model.dest } self._backoff = Backoff(initial=0, base=10, max=60 * 60) self._estimator = estimator self._busy = False self._sync_task: Task = None self._sync_start = Event() self._sync_wait = Event() self._sync_wait.set() self.trigger()
def __init__(self, connection: Connection, h2_connection: H2Connection, transport: Transport, *, stream_id: Optional[int] = None, wrapper: Optional[Wrapper] = None) -> None: self.connection = connection self._h2_connection = h2_connection self._transport = transport self.wrapper = wrapper if stream_id is not None: self.init_stream(stream_id, self.connection) self.window_updated = Event() self.headers: Optional['_Headers'] = None self.headers_received = Event() self.trailers: Optional['_Headers'] = None self.trailers_received = Event()
async def test_error_handler_called_if_error_raised(qtbot, task_runner): result = Result() event = Event() def _task(): raise ValueError('value') with qtbot.waitSignal(task_runner.taskErrored, timeout=10000): task_runner.run_task(_task, on_error=_handler(event, result)) await wait_for(event.wait(), 1) assert isinstance(result.value, ValueError)
def __init__(self, scheduler: 'Scheduler', task: Task, timestamp: float, instance: Any, args: Sequence[Any], kwargs: Mapping[str, Any]): self.scheduler = scheduler self.task = task self.instance = instance self.timestamp = timestamp self.async_task = None self.stopped_event = Event() self.args = tuple(args) if args else () self.kwargs = dict(kwargs.items()) if kwargs else {}
def __init__(self, merkle, source_func): """Initialise a cache hashes taken from source_func: async def source_func(index, count): ... """ self.merkle = merkle self.source_func = source_func self.length = 0 self.depth_higher = 0 self.initialized = Event()
def __init__(self, device_ids: dict, log_handlers: [StreamHandler]): """ Initialize scanner and prep for run. :param device_ids: The list of devices to look for. """ self._logger = getLogger(__name__) for h in log_handlers: self._logger.addHandler(h) self._logger.debug("Initializing") self._device_ids = device_ids self._connect_event = Event() self._disconnect_event = Event() self._connect_err_event = Event() self._new_coms = [] self._lost_coms = [] self._known_ports = [] self._serials = {} self._tasks = [] self._loop = get_running_loop() self._logger.debug("Initialized")
def __init__(self, shard_id, client, loop: AbstractEventLoop): """ Handles all Discord Shard related events. For more information on what sharding is and how it works: https://discord.com/developers/docs/topics/gateway#sharding. This simply represents a Discord Shard. The actual handling of events happens via the client's handlers. :param shard_id: The id for the shard. :param client: A speedcord.Client object which will manage the shards. :param loop: an AbstractEventLoop which is used to create callbacks. """ self.id = shard_id self.client = client self.loop = loop self.ws = None self.gateway_url = None self.logger = getLogger(f"speedcord.shard.{self.id}") self.connected = Event( loop=self.loop ) # Some bots might wanna know which shards is online at all times self.received_heartbeat_ack = True self.heartbeat_interval = None self.heartbeat_count = None self.failed_heartbeats = 0 self.session_id = None self.last_event_id = None # This gets modified by gateway.py self.is_closing = False self.is_initial_connect = True self.active = True self.send_ratelimiter = TimesPer(120, 60) self.is_ready = Event(loop=self.loop) self.active = False # Will only handle core events # Default events self.client.opcode_dispatcher.register(10, self.handle_hello) self.client.opcode_dispatcher.register(11, self.handle_heartbeat_ack) self.client.opcode_dispatcher.register(9, self.handle_invalid_session) self.client.event_dispatcher.register("READY", self.handle_ready)
def __init__(self, remote_addr: str, local_discr: int, min_rx: Optional[int] = None, role: Optional[str] = None, logger: Optional[Logger] = None): self._logger: Logger = logger or getLogger() self._loop: AbstractEventLoop = get_event_loop() self.counters: SessCounters = SessCounters() self._LocalDiscr: int = local_discr self.name: str = str(local_discr) self._min_rx_arg: int = min_rx or self._default_min_rx self._role_arg: str = role or SessionRole.Active self._Role: str = self._role_arg self.AuthSeqKnown: bool = False self.RcvAuthSeq: int = 0 self.addr: str = remote_addr self._SessionState: int = SessionState.AdminDown self._StateFutures: Dict[int, Set[Future]] = {} self._RemoteSessionState: int = SessionState.Down self._RemoteDiscr: int = 0 self.LocalDiag: int = Diag.NoDiag self._DesiredMinTxInterval: int = self._min_rx_arg self._RequiredMinRxInterval: int = self._min_rx_arg self._RemoteMinRxInterval: int = 1 self._RemoteMinTxInterval: int = 1 self._DemandMode: bool = False self._RemoteDemandMode: bool = False self._DetectMult: int = self._default_detect_mult self._RemoteDetectMult: int = 0 self._recv_queue: Queue[Optional[CtlPacket]] = Queue(32) self.send_callable: Optional[CbSend] = None self._recv_task: Optional[Task] = None self._send_task: Optional[Task] = None self._poll_result: Optional[Future] = None self._local_send_intvl: int = self._DesiredMinTxInterval self._immediate_send_event: Event = Event() self._periodical_send_event: Event = Event() self._DetectionTime: int = 1_000_000 self.RequiredMinEchoInterval: int = 0 # TODO: Not implemented. should be 0 self._tasks: Set[Task] = set()
async def test_tasks_queued_later(self): loop = get_event_loop() async def func(throttle, event): await throttle event.set() for i in range(0, 10): await asyncio.sleep(0.1 * i) time_a = time() throttler = Throttler() event_a = Event() task_a = ensure_future(func(throttler(), event_a)) await event_a.wait() self.assertTrue(is_at_or_just_after(time(), time_a)) await asyncio.sleep(0.5) now = time() time_b = \ now if int(now) > int(time_a) else \ floor(time_a + 1.0) time_c = floor(time_b + 1.0) event_b = Event() task_b = ensure_future(func(throttler(), event_b)) event_c = Event() task_c = ensure_future(func(throttler(), event_c)) await event_b.wait() self.assertTrue(is_at_or_just_after(time(), time_b)) await event_c.wait() self.assertTrue(is_at_or_just_after(time(), time_c))
def __init__(self, tasks=(), *, wait=all): if wait not in (any, all, object): raise ValueError('invalid wait argument') self._done = deque() self._pending = set() self._wait = wait self._done_event = Event() self._logger = logging.getLogger(self.__class__.__name__) self._closed = False self.completed = None for task in tasks: self._add_task(task)
def __init__(self, topic_partitions: Set[TopicPartition], *, loop): assert isinstance(topic_partitions, (list, set, tuple)) self._topic_partitions = frozenset(topic_partitions) self._tp_state = {} # type: Dict[TopicPartition:TopicPartitionState] for tp in self._topic_partitions: self._tp_state[tp] = TopicPartitionState(self, loop=loop) self._loop = loop self.unassign_future = create_future(loop) self.commit_refresh_needed = Event(loop=loop)
def __init__(self, stream: StreamReader, log_handlers: [StreamHandler] = None): self._logger = getLogger(__name__) if log_handlers: for h in log_handlers: self._logger.addHandler(h) self._stream = stream self._done_flag = Event() self._cancel_bool = False self._current_status = 0 self.running = True
async def test_decorator(self): event: Emitter[str] = Emitter() done = Event() @event async def greeter(value: str): assert value == 'hello' done.set() get_event_loop().call_soon(lambda: event.emit('hello')) await done.wait()
async def test_blind_detection(): with NamedTemporaryFile() as database_fd: conn = sqlite3.connect(database_fd.name) cursor = conn.cursor() cursor.execute("CREATE TABLE users (id INTEGER PRIMARY KEY, username TEXT, password TEXT)") conn.commit() cursor.execute("INSERT INTO users (id, username, password) VALUES (1, \"admin\", \"123456\")") conn.commit() cursor.close() conn.close() def process(http_request): try: user_id = parse_qs(urlparse(str(http_request.url)).query)["user_id"][0] except (IndexError, KeyError): return httpx.Response(200, text="Unknown user") else: conn = sqlite3.connect(database_fd.name) cursor = conn.cursor() try: # Will you spot the SQLi vulnerability? :D cursor.execute("SELECT username FROM users WHERE id = {}".format(user_id)) row = cursor.fetchone() except sqlite3.OperationalError: cursor.close() conn.close() return httpx.Response(200, text="Unknown user") else: cursor.close() conn.close() if row: return httpx.Response(200, text="Welcome {}".format(row[0])) else: return httpx.Response(200, text="Unknown user") respx.get(url__regex=r"http://perdu\.com/\?user_id=.*").mock(side_effect=process) persister = AsyncMock() request = Request("http://perdu.com/?user_id=1") request.path_id = 1 crawler = AsyncCrawler(Request("http://perdu.com/"), timeout=1) options = {"timeout": 10, "level": 1} module = ModuleSql(crawler, persister, options, Event()) module.do_post = True await module.attack(request) assert persister.add_payload.call_count # One request for error-based, one to get normal response, four to test boolean-based attack assert respx.calls.call_count == 6 await crawler.close()
async def pipe_factory_tcp(unused_tcp_port, client_arguments=None, server_arguments=None, auto_connect_client=True): wait_for_server = Event() def session(*connection): nonlocal server server = RSocketServer(TransportTCP(*connection), **(server_arguments or {})) wait_for_server.set() async def start(): nonlocal service, client service = await asyncio.start_server(session, host, port) connection = await asyncio.open_connection(host, port) nonlocal client_arguments # test_overrides = {'keep_alive_period': timedelta(minutes=20)} client_arguments = client_arguments or {} # client_arguments.update(test_overrides) client = RSocketClient(single_transport_provider(TransportTCP(*connection)), **(client_arguments or {})) if auto_connect_client: await client.connect() async def finish(): if auto_connect_client: await client.close() await server.close() service.close() service: Optional[Server] = None server: Optional[RSocketServer] = None client: Optional[RSocketClient] = None port = unused_tcp_port host = 'localhost' await start() async def server_provider(): await wait_for_server.wait() return server try: if auto_connect_client: await wait_for_server.wait() yield server, client else: yield server_provider, client assert_no_open_streams(client, server) finally: await finish()