def _run_doctest(self, *fnames): here = os.path.dirname(__file__) sqla_base = os.path.normpath(os.path.join(here, "..", "..")) optionflags = (doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.IGNORE_EXCEPTION_DETAIL | _get_allow_unicode_flag()) runner = doctest.DocTestRunner( verbose=None, optionflags=optionflags, checker=_get_unicode_checker(), ) parser = doctest.DocTestParser() globs = {"print_function": print_function} for fname in fnames: path = os.path.join(sqla_base, "doc/build", fname) if not os.path.exists(path): config.skip_test("Can't find documentation file %r" % path) with open(path, encoding="utf-8") as file_: content = file_.read() content = re.sub(r"{(?:stop|sql|opensql)}", "", content) test = parser.get_doctest(content, globs, fname, fname, 0) runner.run(test, clear_globs=False) runner.summarize() globs.update(test.globs) assert not runner.failures
def _do_skips(cls): reasons = [] all_configs = _possible_configs_for_cls(cls, reasons) if getattr(cls, "__skip_if__", False): for c in getattr(cls, "__skip_if__"): if c(): config.skip_test("'%s' skipped by %s" % (cls.__name__, c.__name__)) if not all_configs: msg = "'%s' unsupported on any DB implementation %s%s" % ( cls.__name__, ", ".join("'%s(%s)+%s'" % ( config_obj.db.name, ".".join( str(dig) for dig in exclusions._server_version(config_obj.db)), config_obj.db.driver, ) for config_obj in config.Config.all_configs()), ", ".join(reasons), ) config.skip_test(msg) elif hasattr(cls, "__prefer_backends__"): non_preferred = set() spec = exclusions.db_spec(*util.to_list(cls.__prefer_backends__)) for config_obj in all_configs: if not spec(config_obj): non_preferred.add(config_obj) if all_configs.difference(non_preferred): all_configs.difference_update(non_preferred) if config._current not in all_configs: _setup_config(all_configs.pop(), cls)
def _run_doctest(self, fname): here = os.path.dirname(__file__) sqla_base = os.path.normpath(os.path.join(here, "..", "..")) path = os.path.join(sqla_base, "doc/build", fname) if not os.path.exists(path): config.skip_test("Can't find documentation file %r" % path) with open(path) as file_: content = file_.read() content = re.sub(r"{(?:stop|sql|opensql)}", "", content) self._run_doctest_for_content(fname, content)
def _run_doctest(self, fname): here = os.path.dirname(__file__) sqla_base = os.path.normpath(os.path.join(here, "..", "..")) path = os.path.join(sqla_base, "doc/build", fname) if not os.path.exists(path): config.skip_test("Can't find documentation file %r" % path) with open(path) as file_: content = file_.read() content = re.sub(r'{(?:stop|sql|opensql)}', '', content) self._run_doctest_for_content(fname, content)
def test_mypy(self, mypy_runner, path): filename = os.path.basename(path) use_plugin = True expected_errors = [] expected_re = re.compile(r"\s*# EXPECTED(_MYPY)?: (.+)") py_ver_re = re.compile(r"^#\s*PYTHON_VERSION\s?>=\s?(\d+\.\d+)") with open(path) as file_: for num, line in enumerate(file_, 1): m = py_ver_re.match(line) if m: major, _, minor = m.group(1).partition(".") if sys.version_info < (int(major), int(minor)): config.skip_test("Requires python >= %s" % (m.group(1))) continue if line.startswith("# NOPLUGINS"): use_plugin = False continue m = expected_re.match(line) if m: is_mypy = bool(m.group(1)) expected_msg = m.group(2) expected_msg = re.sub(r"# noqa ?.*", "", m.group(2)) expected_errors.append( (num, is_mypy, expected_msg.strip())) result = mypy_runner(path, use_plugin=use_plugin) if expected_errors: eq_(result[2], 1, msg=result) print(result[0]) errors = [] for e in result[0].split("\n"): if re.match(r".+\.py:\d+: error: .*", e): errors.append(e) for num, is_mypy, msg in expected_errors: msg = msg.replace("'", '"') prefix = "[SQLAlchemy Mypy plugin] " if not is_mypy else "" for idx, errmsg in enumerate(errors): if (f"{filename}:{num + 1}: error: {prefix}{msg}" in errmsg.replace("'", '"')): break else: continue del errors[idx] assert not errors, "errors remain: %s" % "\n".join(errors) else: eq_(result[2], 0, msg=result)
def _do_skips(cls): reasons = [] all_configs = _possible_configs_for_cls(cls, reasons) if getattr(cls, '__skip_if__', False): for c in getattr(cls, '__skip_if__'): if c(): config.skip_test("'%s' skipped by %s" % ( cls.__name__, c.__name__) ) if not all_configs: if getattr(cls, '__backend__', False): msg = "'%s' unsupported for implementation '%s'" % ( cls.__name__, cls.__only_on__) else: msg = "'%s' unsupported on any DB implementation %s%s" % ( cls.__name__, ", ".join( "'%s(%s)+%s'" % ( config_obj.db.name, ".".join( str(dig) for dig in config_obj.db.dialect.server_version_info), config_obj.db.driver ) for config_obj in config.Config.all_configs() ), ", ".join(reasons) ) config.skip_test(msg) elif hasattr(cls, '__prefer_backends__'): non_preferred = set() spec = exclusions.db_spec(*util.to_list(cls.__prefer_backends__)) for config_obj in all_configs: if not spec(config_obj): non_preferred.add(config_obj) if all_configs.difference(non_preferred): all_configs.difference_update(non_preferred) if config._current not in all_configs: _setup_config(all_configs.pop(), cls)
def test_files(self, mypy_runner, filename, path, use_plugin): expected_messages = [] expected_re = re.compile(r"\s*# EXPECTED(_MYPY)?(_RE)?(_TYPE)?: (.+)") py_ver_re = re.compile(r"^#\s*PYTHON_VERSION\s?>=\s?(\d+\.\d+)") with open(path) as file_: current_assert_messages = [] for num, line in enumerate(file_, 1): m = py_ver_re.match(line) if m: major, _, minor = m.group(1).partition(".") if sys.version_info < (int(major), int(minor)): config.skip_test("Requires python >= %s" % (m.group(1))) continue if line.startswith("# NOPLUGINS"): use_plugin = False continue m = expected_re.match(line) if m: is_mypy = bool(m.group(1)) is_re = bool(m.group(2)) is_type = bool(m.group(3)) expected_msg = re.sub(r"# noqa[:]? ?.*", "", m.group(4)) if is_type: if not is_re: # the goal here is that we can cut-and-paste # from vscode -> pylance into the # EXPECTED_TYPE: line, then the test suite will # validate that line against what mypy produces expected_msg = re.sub( r"([\[\]])", lambda m: rf"\{m.group(0)}", expected_msg, ) # note making sure preceding text matches # with a dot, so that an expect for "Select" # does not match "TypedSelect" expected_msg = re.sub( r"([\w_]+)", lambda m: rf"(?:.*\.)?{m.group(1)}\*?", expected_msg, ) expected_msg = re.sub("List", "builtins.list", expected_msg) expected_msg = re.sub( r"\b(int|str|float|bool)\b", lambda m: rf"builtins.{m.group(0)}\*?", expected_msg, ) # expected_msg = re.sub( # r"(Sequence|Tuple|List|Union)", # lambda m: fr"typing.{m.group(0)}\*?", # expected_msg, # ) is_mypy = is_re = True expected_msg = f'Revealed type is "{expected_msg}"' current_assert_messages.append( (is_mypy, is_re, expected_msg.strip())) elif current_assert_messages: expected_messages.extend( (num, is_mypy, is_re, expected_msg) for ( is_mypy, is_re, expected_msg, ) in current_assert_messages) current_assert_messages[:] = [] result = mypy_runner(path, use_plugin=use_plugin) if expected_messages: eq_(result[2], 1, msg=result) output = [] raw_lines = result[0].split("\n") while raw_lines: e = raw_lines.pop(0) if re.match(r".+\.py:\d+: error: .*", e): output.append(("error", e)) elif re.match( r".+\.py:\d+: note: +(?:Possible overload|def ).*", e): while raw_lines: ol = raw_lines.pop(0) if not re.match(r".+\.py:\d+: note: +def \[.*", ol): break elif re.match(r".+\.py:\d+: note: .*(?:perhaps|suggestion)", e, re.I): pass elif re.match(r".+\.py:\d+: note: .*", e): output.append(("note", e)) for num, is_mypy, is_re, msg in expected_messages: msg = msg.replace("'", '"') prefix = "[SQLAlchemy Mypy plugin] " if not is_mypy else "" for idx, (typ, errmsg) in enumerate(output): if is_re: if re.match( rf".*{filename}\:{num}\: {typ}\: {prefix}{msg}", # noqa: E501 errmsg, ): break elif (f"{filename}:{num}: {typ}: {prefix}{msg}" in errmsg.replace("'", '"')): break else: continue del output[idx] if output: print(f"{len(output)} messages from mypy were not consumed:") print("\n".join(msg for _, msg in output)) assert False, "errors and/or notes remain, see stdout" else: if result[2] != 0: print(result[0]) eq_(result[2], 0, msg=result)
def test_files(self, mypy_runner, filename, path, use_plugin): expected_messages = [] expected_re = re.compile(r"\s*# EXPECTED(_MYPY)?(_RE)?(_TYPE)?: (.+)") py_ver_re = re.compile(r"^#\s*PYTHON_VERSION\s?>=\s?(\d+\.\d+)") with open(path) as file_: current_assert_messages = [] for num, line in enumerate(file_, 1): m = py_ver_re.match(line) if m: major, _, minor = m.group(1).partition(".") if sys.version_info < (int(major), int(minor)): config.skip_test( "Requires python >= %s" % (m.group(1)) ) continue if line.startswith("# NOPLUGINS"): use_plugin = False continue m = expected_re.match(line) if m: is_mypy = bool(m.group(1)) is_re = bool(m.group(2)) is_type = bool(m.group(3)) expected_msg = re.sub(r"# noqa ?.*", "", m.group(4)) if is_type: is_mypy = is_re = True expected_msg = f'Revealed type is "{expected_msg}"' current_assert_messages.append( (is_mypy, is_re, expected_msg.strip()) ) elif current_assert_messages: expected_messages.extend( (num, is_mypy, is_re, expected_msg) for ( is_mypy, is_re, expected_msg, ) in current_assert_messages ) current_assert_messages[:] = [] result = mypy_runner(path, use_plugin=use_plugin) if expected_messages: eq_(result[2], 1, msg=result) output = [] raw_lines = result[0].split("\n") while raw_lines: e = raw_lines.pop(0) if re.match(r".+\.py:\d+: error: .*", e): output.append(("error", e)) elif re.match( r".+\.py:\d+: note: +(?:Possible overload|def ).*", e ): while raw_lines: ol = raw_lines.pop(0) if not re.match(r".+\.py:\d+: note: +def \[.*", ol): break elif re.match( r".+\.py:\d+: note: .*(?:perhaps|suggestion)", e, re.I ): pass elif re.match(r".+\.py:\d+: note: .*", e): output.append(("note", e)) for num, is_mypy, is_re, msg in expected_messages: msg = msg.replace("'", '"') prefix = "[SQLAlchemy Mypy plugin] " if not is_mypy else "" for idx, (typ, errmsg) in enumerate(output): if is_re: if re.match( fr".*{filename}\:{num}\: {typ}\: {prefix}{msg}", # noqa E501 errmsg, ): break elif ( f"{filename}:{num}: {typ}: {prefix}{msg}" in errmsg.replace("'", '"') ): break else: continue del output[idx] if output: print("messages from mypy that were not consumed:") print("\n".join(msg for _, msg in output)) assert False, "errors and/or notes remain, see stdout" else: if result[2] != 0: print(result[0]) eq_(result[2], 0, msg=result)
async def run_test(subject, trans_on_subject, execute_on_subject): async with subject.begin() as trans: if begin_nested: if not config.requirements.savepoints.enabled: config.skip_test("savepoints not enabled") if execute_on_subject: nested_trans = subject.begin_nested() else: nested_trans = trans.begin_nested() async with nested_trans: if execute_on_subject: await subject.execute(t.insert(), {"data": 10}) else: await trans.execute(t.insert(), {"data": 10}) # for nested trans, we always commit/rollback on the # "nested trans" object itself. # only Session(future=False) will affect savepoint # transaction for session.commit/rollback if rollback: await nested_trans.rollback() else: await nested_trans.commit() if run_second_execute: with assertions.expect_raises_message( exc.InvalidRequestError, "Can't operate on closed transaction " "inside context manager. Please complete the " "context manager " "before emitting further commands.", ): if execute_on_subject: await subject.execute( t.insert(), {"data": 12}) else: await trans.execute( t.insert(), {"data": 12}) # outside the nested trans block, but still inside the # transaction block, we can run SQL, and it will be # committed if execute_on_subject: await subject.execute(t.insert(), {"data": 14}) else: await trans.execute(t.insert(), {"data": 14}) else: if execute_on_subject: await subject.execute(t.insert(), {"data": 10}) else: await trans.execute(t.insert(), {"data": 10}) if trans_on_subject: if rollback: await subject.rollback() else: await subject.commit() else: if rollback: await trans.rollback() else: await trans.commit() if run_second_execute: with assertions.expect_raises_message( exc.InvalidRequestError, "Can't operate on closed transaction inside " "context " "manager. Please complete the context manager " "before emitting further commands.", ): if execute_on_subject: await subject.execute(t.insert(), {"data": 12}) else: await trans.execute(t.insert(), {"data": 12}) expected_committed = 0 if begin_nested: # begin_nested variant, we inserted a row after the nested # block expected_committed += 1 if not rollback: # not rollback variant, our row inserted in the target # block itself would be committed expected_committed += 1 if execute_on_subject: eq_( await subject.scalar(select(func.count()).select_from(t)), expected_committed, ) else: with subject.connect() as conn: eq_( await conn.scalar(select(func.count()).select_from(t)), expected_committed, )