def _get(self, path, preprocessor, is_archive=False): path = pathlib.Path(path) # Ask lib50 which file(s) in path should be included if path.is_file(): # lib50.files operates on a directory # So create a tempdir if the path is just a file with tempfile.TemporaryDirectory() as dir: (pathlib.Path(dir) / path.name).touch() included, excluded = lib50.files(self.patterns, root=dir) path = path.parent else: included, excluded = lib50.files(self.patterns, require_tags=[], root=path) # Filter out any non utf8 files decodable, undecodable = partition( included, lambda fp: self._is_valid_utf8(path / fp)) # Filter out any large files (>self.max_file_size) small, large = partition( decodable, lambda fp: (path / fp).stat().st_size <= self.max_file_size) return _data.Submission(path, sorted(small), large_files=sorted(large), undecodable_files=sorted(undecodable), preprocessor=preprocessor, is_archive=is_archive)
def test_implicit_recursive(self): os.mkdir("foo") open("foo/bar.py", "w").close() open("qux.py", "w").close() content = \ "check50:\n" \ " files:\n" \ " - !exclude \"*.py\"\n" config = self.loader.load(content) included, excluded = lib50.files(config.get("files")) self.assertEqual(set(included), set()) self.assertEqual(set(excluded), {"qux.py", "foo/bar.py"}) content = \ "check50:\n" \ " files:\n" \ " - !exclude \"./*.py\"\n" config = self.loader.load(content) included, excluded = lib50.files(config.get("files")) self.assertEqual(set(included), {"foo/bar.py"}) self.assertEqual(set(excluded), {"qux.py"})
def _get(self, path, preprocessor, is_archive=False): path = pathlib.Path(path) if path.is_file(): with tempfile.TemporaryDirectory() as dir: (pathlib.Path(dir) / path.name).touch() included, excluded = lib50.files(self.patterns, root=dir) path = path.parent else: included, excluded = lib50.files(self.patterns, require_tags=[], root=path) decodable_files = [] for file_path in included: try: with open(path / file_path) as f: f.read() except UnicodeDecodeError: pass else: decodable_files.append(file_path) if not decodable_files: raise _api.Error(f"Empty submission: {path}") decodable_files = sorted(decodable_files) return _data.Submission(path, decodable_files, preprocessor=preprocessor, is_archive=is_archive)
def test_requires_no_exclude(self): content = \ "check50:\n" \ " files:\n" \ " - !require does_not_exist.py\n" config = self.loader.load(content) with self.assertRaises(lib50.MissingFilesError): lib50.files(config.get("files"))
def _get(self, path, preprocessor): path = pathlib.Path(path) if path.is_file(): included, excluded = [path.name], [] path = path.parent else: included, excluded = lib50.files(self.patterns, root=path, always_exclude=[]) decodable_files = [] for file_path in included: try: with open(path / file_path) as f: f.read() except UnicodeDecodeError: pass else: decodable_files.append(file_path) if not decodable_files: raise _api.Error(f"Empty submission: {path}") decodable_files = sorted(decodable_files) return _data.Submission(path, decodable_files, preprocessor=preprocessor)
def test_exclude_all(self): content = \ "check50:\n" \ " files:\n" \ " - !exclude \"*\"\n" config = self.loader.load(content) included, excluded = lib50.files(config.get("files")) self.assertEqual(included, set()) self.assertEqual(excluded, set()) open("foo.py", "w").close() included, excluded = lib50.files(config.get("files")) self.assertEqual(set(included), set()) self.assertEqual(set(excluded), {"foo.py"})
def test_invalid_utf8_filename(self): try: open(b"\xc3\x28", "w").close() except OSError: self.skipTest("can't create invalid utf8 filename") else: included, excluded = lib50.files({}) self.assertEqual(included, set()) self.assertEqual(excluded, {"?("})
def test_from_root(self): os.mkdir("foo") os.mkdir("foo/bar") os.mkdir("foo/bar/baz") open("foo/bar/baz/qux.py", "w").close() open("foo/hello.py", "w").close() included, excluded = lib50.files([], root="foo") self.assertEqual(included, {"bar/baz/qux.py", "hello.py"}) self.assertEqual(excluded, set())
def test_required(self): content = \ "check50:\n" \ " files:\n" \ " - !require foo.py\n" config = self.loader.load(content) open("foo.py", "w").close() included, excluded = lib50.files(config.get("files")) self.assertEqual(set(included), {"foo.py"}) self.assertEqual(set(excluded), set()) open("bar.c", "w").close() included, excluded = lib50.files(config.get("files")) self.assertEqual(set(included), {"foo.py", "bar.c"}) self.assertEqual(set(excluded), set())
def test_non_file_require(self): open("foo.py", "w").close() content = \ "check50:\n" \ " files:\n" \ " - !require \"*.py\"\n" config = self.loader.load(content) with self.assertRaises(lib50.MissingFilesError): included, excluded = lib50.files(config.get("files"))
def test_exclude_only_one(self): content = \ "check50:\n" \ " files:\n" \ " - !exclude foo.py\n" config = self.loader.load(content) open("foo.py", "w").close() open("bar.py", "w").close() included, excluded = lib50.files(config.get("files")) self.assertEqual(set(included), {"bar.py"}) self.assertEqual(set(excluded), {"foo.py"})
def test_exclude_everything_include_folder(self): content = \ "check50:\n" \ " files:\n" \ " - !exclude \"*\"\n" \ " - !include foo\n" config = self.loader.load(content) os.mkdir("foo") open("foo/bar.py", "w").close() included, excluded = lib50.files(config.get("files")) self.assertEqual(set(included), {"foo/bar.py"}) self.assertEqual(set(excluded), set())
def test_implicit_recursive_with_slash(self): content = \ "check50:\n" \ " files:\n" \ " - !exclude \"*/*.py\"\n" config = self.loader.load(content) os.mkdir("foo") os.mkdir("foo/bar") open("foo/bar/baz.py", "w").close() open("foo/qux.py", "w").close() included, excluded = lib50.files(config.get("files")) self.assertEqual(set(included), {"foo/bar/baz.py"}) self.assertEqual(set(excluded), {"foo/qux.py"})
def test_no_tags(self): open("foo.py", "w").close() open("bar.py", "w").close() open("baz.py", "w").close() content = \ "check50:\n" \ " files:\n" \ " - !include \"foo.py\"\n" \ " - !exclude \"bar.py\"\n" \ " - !require \"baz.py\"\n" config = self.loader.load(content) included, excluded = lib50.files(config.get("files"), exclude_tags=[], include_tags=[], require_tags=[]) self.assertEqual(included, {"foo.py", "bar.py", "baz.py"}) self.assertEqual(excluded, set())
def test_lab50_tags(self): # Four dummy files open("foo.py", "w").close() open("bar.py", "w").close() open("baz.py", "w").close() open("qux.py", "w").close() # Dummy config file (.cs50.yml) content = \ "lab50:\n" \ " files:\n" \ " - !open \"foo.py\"\n" \ " - !include \"bar.py\"\n" \ " - !exclude \"baz.py\"\n" \ " - \"qux.py\"\n" # Create a config Loader for a tool called lab50 loader = lib50.config.Loader("lab50") # Scope the files section of lab50 with the tags: open, include and exclude loader.scope("files", "open", "include", "exclude", default="include") # Load the config config = loader.load(content) # Figure out which files have an open tag opened_files = [ tagged_value.value for tagged_value in config["files"] if tagged_value.tag == "open" ] # Have lib50.files figure out which files should be included and excluded # Simultaneously ensure all open files exist included, excluded = lib50.files(config["files"], require_tags=["open"]) # Make sure that files tagged with open are also included opened_files = [file for file in opened_files if file in included] # Assert self.assertEqual(included, {"foo.py", "bar.py", "qux.py"}) self.assertEqual(excluded, {"baz.py"}) self.assertEqual(set(opened_files), {"foo.py"})
def test_custom_tags(self): open("foo.py", "w").close() open("bar.py", "w").close() open("baz.py", "w").close() content = \ "foo50:\n" \ " files:\n" \ " - !open \"foo.py\"\n" \ " - !close \"bar.py\"\n" \ " - !exclude \"baz.py\"\n" loader = lib50.config.Loader("foo50") loader.scope("files", "open", "close", "exclude") config = loader.load(content) included, excluded = lib50.files(config.get("files"), exclude_tags=["exclude"], include_tags=[""], require_tags=["open", "close"]) self.assertEqual(included, {"foo.py", "bar.py"}) self.assertEqual(excluded, {"baz.py"})
def main(): parser = argparse.ArgumentParser(prog="check50") parser.add_argument("slug", help=_("prescribed identifier of work to check")) parser.add_argument( "-d", "--dev", action="store_true", help= _("run check50 in development mode (implies --offline and --verbose).\n" "causes SLUG to be interpreted as a literal path to a checks package" )) parser.add_argument( "--offline", action="store_true", help=_("run checks completely offline (implies --local)")) parser.add_argument( "-l", "--local", action="store_true", help= _("run checks locally instead of uploading to cs50 (enabled by default in beta version)" )) parser.add_argument( "--log", action="store_true", help=_("display more detailed information about check results")) parser.add_argument("-o", "--output", action="store", default="ansi", choices=["ansi", "json"], help=_("format of check results")) parser.add_argument( "-v", "--verbose", action="store_true", help=_( "display the full tracebacks of any errors (also implies --log)")) parser.add_argument("-V", "--version", action="version", version=f"%(prog)s {__version__}") parser.add_argument("--logout", action=LogoutAction) args = parser.parse_args() # TODO: remove this when submit.cs50.io API is stabilized args.local = True if args.dev: args.offline = True args.verbose = True if args.offline: args.local = True if args.verbose: # Show lib50 commands being run in verbose mode logging.basicConfig(level="INFO") lib50.ProgressBar.DISABLED = True args.log = True excepthook.verbose = args.verbose excepthook.output = args.output if args.local: # If developing, assume slug is a path to check_dir if args.dev: internal.check_dir = Path(args.slug).expanduser().resolve() if not internal.check_dir.is_dir(): raise Error( _("{} is not a directory").format(internal.check_dir)) else: # Otherwise have lib50 create a local copy of slug internal.check_dir = lib50.local(args.slug, "check50", offline=args.offline) config = internal.load_config(internal.check_dir) install_translations(config["translations"]) if not args.offline: install_dependencies(config["dependencies"], verbose=args.verbose) checks_file = (internal.check_dir / config["checks"]).resolve() # Have lib50 decide which files to include included = lib50.files(config.get("files"))[0] if args.verbose: stdout = sys.stdout stderr = sys.stderr else: stdout = stderr = open(os.devnull, "w") # Create a working_area (temp dir) with all included student files named - with lib50.working_area(included, name='-') as working_area, \ contextlib.redirect_stdout(stdout), \ contextlib.redirect_stderr(stderr): results = CheckRunner(checks_file).run(included, working_area) else: # TODO: Remove this before we ship raise NotImplementedError( "cannot run check50 remotely, until version 3.0.0 is shipped ") username, commit_hash = lib50.push("check50", args.slug) results = await_results( f"https://cs50.me/check50/status/{username}/{commit_hash}") if args.output == "json": print_json(results) else: print_ansi(results, log=args.log)
def main(): parser = argparse.ArgumentParser(prog="check50") parser.add_argument("slug", help=_("prescribed identifier of work to check")) parser.add_argument( "-d", "--dev", action="store_true", help= _("run check50 in development mode (implies --offline and --verbose info).\n" "causes SLUG to be interpreted as a literal path to a checks package" )) parser.add_argument( "--offline", action="store_true", help= _("run checks completely offline (implies --local, --no-download-checks and --no-install-dependencies)" )) parser.add_argument( "-l", "--local", action="store_true", help=_("run checks locally instead of uploading to cs50")) parser.add_argument( "--log", action="store_true", help=_("display more detailed information about check results")) parser.add_argument("-o", "--output", action="store", nargs="+", default=["ansi", "html"], choices=["ansi", "json", "html"], help=_("format of check results")) parser.add_argument("--target", action="store", nargs="+", help=_("target specific checks to run")) parser.add_argument("--output-file", action="store", metavar="FILE", help=_("file to write output to")) parser.add_argument( "-v", "--verbose", action="store", nargs="?", default="", const="info", choices=[ attr for attr in dir(logging) if attr.isupper() and isinstance(getattr(logging, attr), int) ], type=str.upper, help=_( "sets the verbosity level." ' "INFO" displays the full tracebacks of errors and shows all commands run.' ' "DEBUG" adds the output of all command run.')) parser.add_argument( "--no-download-checks", action="store_true", help= _("do not download checks, but use previously downloaded checks instead (only works with --local)" )) parser.add_argument( "--no-install-dependencies", action="store_true", help=_("do not install dependencies (only works with --local)")) parser.add_argument("-V", "--version", action="version", version=f"%(prog)s {__version__}") parser.add_argument("--logout", action=LogoutAction) args = parser.parse_args() global SLUG SLUG = args.slug # dev implies offline and verbose "info" if not overwritten if args.dev: args.offline = True if not args.verbose: args.verbose = "info" # offline implies local if args.offline: args.no_install_dependencies = True args.no_download_checks = True args.local = True # Setup logging for lib50 depending on verbosity level setup_logging(args.verbose) # Warning in case of running remotely with no_download_checks or no_install_dependencies set if not args.local: useless_args = [] if args.no_download_checks: useless_args.append("--no-downloads-checks") if args.no_install_dependencies: useless_args.append("--no-install-dependencies") if useless_args: termcolor.cprint(_( "Warning: you should always use --local when using: {}".format( ", ".join(useless_args))), "yellow", attrs=["bold"]) # Filter out any duplicates from args.output seen_output = set() args.output = [ output for output in args.output if not (output in seen_output or seen_output.add(output)) ] # Set excepthook excepthook.verbose = bool(args.verbose) excepthook.outputs = args.output excepthook.output_file = args.output_file # If remote, push files to GitHub and await results if not args.local: commit_hash = lib50.push("check50", SLUG, internal.CONFIG_LOADER, data={"check50": True})[1] with lib50.ProgressBar("Waiting for results" ) if "ansi" in args.output else nullcontext(): tag_hash, results = await_results(commit_hash, SLUG) # Otherwise run checks locally else: with lib50.ProgressBar( "Checking") if "ansi" in args.output else nullcontext(): # If developing, assume slug is a path to check_dir if args.dev: internal.check_dir = Path(SLUG).expanduser().resolve() if not internal.check_dir.is_dir(): raise internal.Error( _("{} is not a directory").format(internal.check_dir)) # Otherwise have lib50 create a local copy of slug else: try: internal.check_dir = lib50.local( SLUG, offline=args.no_download_checks) except lib50.ConnectionError: raise internal.Error( _("check50 could not retrieve checks from GitHub. Try running check50 again with --offline." ).format(SLUG)) except lib50.InvalidSlugError: raise_invalid_slug(SLUG, offline=args.no_download_checks) # Load config config = internal.load_config(internal.check_dir) # Compile local checks if necessary if isinstance(config["checks"], dict): config["checks"] = internal.compile_checks(config["checks"], prompt=args.dev) install_translations(config["translations"]) if not args.no_install_dependencies: install_dependencies(config["dependencies"], verbose=args.verbose) checks_file = (internal.check_dir / config["checks"]).resolve() # Have lib50 decide which files to include included = lib50.files(config.get("files"))[0] with open(os.devnull, "w") if args.verbose else nullcontext() as devnull: # Redirect stdout to devnull if some verbosity level is set if args.verbose: stdout = stderr = devnull else: stdout = sys.stdout stderr = sys.stderr # Create a working_area (temp dir) named - with all included student files with lib50.working_area(included, name='-') as working_area, \ contextlib.redirect_stdout(stdout), \ contextlib.redirect_stderr(stderr): check_results = CheckRunner(checks_file).run( included, working_area, args.target) results = { "slug": SLUG, "results": [attr.asdict(result) for result in check_results], "version": __version__ } # Render output file_manager = open(args.output_file, "w") if args.output_file else nullcontext(sys.stdout) with file_manager as output_file: for output in args.output: if output == "json": output_file.write(renderer.to_json(**results)) output_file.write("\n") elif output == "ansi": output_file.write(renderer.to_ansi(**results, log=args.log)) output_file.write("\n") elif output == "html": if os.environ.get("CS50_IDE_TYPE") and args.local: html = renderer.to_html(**results) subprocess.check_call( ["c9", "exec", "renderresults", "check50", html]) else: if args.local: html = renderer.to_html(**results) with tempfile.NamedTemporaryFile( mode="w", delete=False, suffix=".html") as html_file: html_file.write(html) url = f"file://{html_file.name}" else: url = f"https://submit.cs50.io/check50/{tag_hash}" termcolor.cprint( _("To see the results in your browser go to {}" ).format(url), "white", attrs=["bold"])
def main(): parser = argparse.ArgumentParser( prog="check50", formatter_class=argparse.RawTextHelpFormatter ) parser.add_argument("slug", help=_( "prescribed identifier of work to check")) parser.add_argument( "-d", "--dev", action="store_true", help=_( "run check51 in development mode (implies --offline, and --log-level info).\n" "causes slug to be interpreted as a literal path to a checks package." ), ) parser.add_argument( "--offline", action="store_true", help=_( "run checks completely offline (implies --local, --no-download-checks and --no-install-dependencies)" ), ) parser.add_argument( "-l", "--local", action="store_true", help=_("run checks locally instead of uploading to cs50"), ) parser.add_argument( "-o", "--output", action="store", nargs="+", default=["ansi", "html"], choices=["ansi", "json", "html"], help=_("format of check results"), ) parser.add_argument( "--target", action="store", nargs="+", help=_("target specific checks to run") ) parser.add_argument( "--output-file", action="store", metavar="FILE", help=_("file to write output to"), ) parser.add_argument( "--log-level", action="store", choices=[level.name.lower() for level in LogLevel], type=str.lower, help=_( "warning: displays usage warnings." "\ninfo: adds all commands run, any locally installed dependencies and print messages." "\ndebug: adds the output of all commands run." ), ) parser.add_argument( "--ansi-log", action="store_true", help=_("display log in ansi output mode") ) parser.add_argument( "--no-download-checks", action="store_true", help=_( "do not download checks, but use previously downloaded checks instead (only works with --local)" ), ) parser.add_argument( "--no-install-dependencies", action="store_true", help=_("do not install dependencies (only works with --local)"), ) parser.add_argument( "-V", "--version", action="version", version=f"%(prog)s {__version__}" ) parser.add_argument("--logout", action=LogoutAction) args = parser.parse_args() internal.slug = args.slug # Validate arguments and apply defaults process_args(args) # Set excepthook _exceptions.ExceptHook.initialize(args.output, args.output_file) # Force --dev mode on! args.dev = True args.local = True args.log_level = "info" # """ # If remote, push files to GitHub and await results if not args.local: commit_hash = lib50.push( "check50", internal.slug, internal.CONFIG_LOADER, data={"check50": True} )[1] with lib50.ProgressBar( "Waiting for results" ) if "ansi" in args.output else nullcontext(): tag_hash, results = await_results(commit_hash, internal.slug) # Otherwise run checks locally else: with lib50.ProgressBar("Checking") if "ansi" in args.output else nullcontext(): # If developing, assume slug is a path to check_dir if args.dev: # Note: internal.check_DIR is the location of the slug internal.check_dir = Path(internal.slug).expanduser().resolve() if not internal.check_dir.is_dir(): raise _exceptions.Error( ("{} is not a directory").format(internal.check_dir) ) # Otherwise have lib50 create a local copy of slug else: try: internal.check_dir = lib50.local( internal.slug, offline=args.no_download_checks ) except lib50.ConnectionError: raise _exceptions.Error( _( "check51 could not retrieve checks from GitHub. Try running check51 again with --offline." ).format(internal.slug) ) except lib50.InvalidSlugError: raise_invalid_slug( internal.slug, offline=args.no_download_checks) # Load config config = internal.load_config(internal.check_dir) # Compile local checks if necessary if isinstance(config["checks"], dict): config["checks"] = internal.compile_checks( config["checks"], prompt=args.dev ) install_translations(config["translations"]) if not args.no_install_dependencies: install_dependencies(config["dependencies"]) checks_file = (internal.check_dir / config["checks"]).resolve() # Have lib50 decide which files to include included_files = lib50.files(config.get("files"))[0] # Create a working_area (temp dir) named - with all included student files with CheckRunner( checks_file, included_files ) as check_runner, contextlib.redirect_stdout( LoggerWriter(LOGGER, logging.INFO) ), contextlib.redirect_stderr( LoggerWriter(LOGGER, logging.INFO) ): check_results = check_runner.run(args.target) results = { "slug": internal.slug, "results": [attr.asdict(result) for result in check_results], "version": __version__, } LOGGER.debug(results) # Render output file_manager = ( open(args.output_file, "w") if args.output_file else nullcontext(sys.stdout) ) with file_manager as output_file: for output in args.output: if output == "json": output_file.write(renderer.to_json(**results)) output_file.write("\n") elif output == "ansi": output_file.write(renderer.to_ansi( **results, _log=args.ansi_log)) output_file.write("\n") elif output == "html": if os.environ.get("CS50_IDE_TYPE") and args.local: html = renderer.to_html(**results) subprocess.check_call( ["c9", "exec", "renderresults", "check50", html] ) else: if args.local: html = renderer.to_html( **results ) # The HTML text that needs to go into the file. outputDir = f"{internal.check_dir}/outputs" with open( f"{outputDir}/test{datetime.now().isoformat()}.html", "x+", ) as html_file: html_file.write(html) url = f"file://{html_file.name}" else: url = f"https://submit.cs50.io/check50/{tag_hash}" termcolor.cprint( _("To see the results in your browser go to {}").format(url), "white", attrs=["bold"], )
def main(): parser = argparse.ArgumentParser(prog="check50") parser.add_argument("slug", help=_("prescribed identifier of work to check")) parser.add_argument("-d", "--dev", action="store_true", help=_("run check50 in development mode (implies --offline and --verbose).\n" "causes SLUG to be interpreted as a literal path to a checks package")) parser.add_argument("--offline", action="store_true", help=_("run checks completely offline (implies --local)")) parser.add_argument("-l", "--local", action="store_true", help=_("run checks locally instead of uploading to cs50")) parser.add_argument("--log", action="store_true", help=_("display more detailed information about check results")) parser.add_argument("-o", "--output", action="store", nargs="+", default=["ansi", "html"], choices=["ansi", "json", "html"], help=_("format of check results")) parser.add_argument("--target", action="store", nargs="+", help=_("target specific checks to run")) parser.add_argument("--output-file", action="store", metavar="FILE", help=_("file to write output to")) parser.add_argument("-v", "--verbose", action="store_true", help=_("display the full tracebacks of any errors (also implies --log)")) parser.add_argument("-V", "--version", action="version", version=f"%(prog)s {__version__}") parser.add_argument("--logout", action=LogoutAction) args = parser.parse_args() global SLUG SLUG = args.slug if args.dev: args.offline = True args.verbose = True if args.offline: args.local = True if args.verbose: # Show lib50 commands being run in verbose mode logging.basicConfig(level=os.environ.get("CHECK50_LOGLEVEL", "INFO")) lib50.ProgressBar.DISABLED = True args.log = True # Filter out any duplicates from args.output seen_output = set() args.output = [output for output in args.output if not (output in seen_output or seen_output.add(output))] # Set excepthook excepthook.verbose = args.verbose excepthook.outputs = args.output excepthook.output_file = args.output_file if not args.local: commit_hash = lib50.push("check50", SLUG, internal.CONFIG_LOADER, data={"check50": True})[1] with lib50.ProgressBar("Waiting for results") if "ansi" in args.output else nullcontext(): tag_hash, results = await_results(commit_hash, SLUG) else: with lib50.ProgressBar("Checking") if not args.verbose and "ansi" in args.output else nullcontext(): # If developing, assume slug is a path to check_dir if args.dev: internal.check_dir = Path(SLUG).expanduser().resolve() if not internal.check_dir.is_dir(): raise internal.Error(_("{} is not a directory").format(internal.check_dir)) else: # Otherwise have lib50 create a local copy of slug try: internal.check_dir = lib50.local(SLUG, offline=args.offline) except lib50.ConnectionError: raise internal.Error(_("check50 could not retrieve checks from GitHub. Try running check50 again with --offline.").format(SLUG)) except lib50.InvalidSlugError: raise_invalid_slug(SLUG, offline=args.offline) # Load config config = internal.load_config(internal.check_dir) # Compile local checks if necessary if isinstance(config["checks"], dict): config["checks"] = internal.compile_checks(config["checks"], prompt=args.dev) install_translations(config["translations"]) if not args.offline: install_dependencies(config["dependencies"], verbose=args.verbose) checks_file = (internal.check_dir / config["checks"]).resolve() # Have lib50 decide which files to include included = lib50.files(config.get("files"))[0] # Only open devnull conditionally ctxmanager = open(os.devnull, "w") if not args.verbose else nullcontext() with ctxmanager as devnull: if args.verbose: stdout = sys.stdout stderr = sys.stderr else: stdout = stderr = devnull # Create a working_area (temp dir) with all included student files named - with lib50.working_area(included, name='-') as working_area, \ contextlib.redirect_stdout(stdout), \ contextlib.redirect_stderr(stderr): runner = CheckRunner(checks_file) # Run checks if args.target: check_results = runner.run(args.target, included, working_area) else: check_results = runner.run_all(included, working_area) results = { "slug": SLUG, "results": [attr.asdict(result) for result in check_results], "version": __version__ } # Render output file_manager = open(args.output_file, "w") if args.output_file else nullcontext(sys.stdout) with file_manager as output_file: for output in args.output: if output == "json": output_file.write(renderer.to_json(**results)) output_file.write("\n") elif output == "ansi": output_file.write(renderer.to_ansi(**results, log=args.log)) output_file.write("\n") elif output == "html": if os.environ.get("CS50_IDE_TYPE") and args.local: html = renderer.to_html(**results) subprocess.check_call(["c9", "exec", "renderresults", "check50", html]) else: if args.local: html = renderer.to_html(**results) with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".html") as html_file: html_file.write(html) url = f"file://{html_file.name}" else: url = f"https://submit.cs50.io/check50/{tag_hash}" termcolor.cprint(_("To see the results in your browser go to {}").format(url), "white", attrs=["bold"])